edited_code
stringlengths
17
978k
original_code
stringlengths
17
978k
#!/usr/bin/env python from pdb import set_trace as bp import json import os import re import sys import glob import pandas as pd import typer from typing import Optional from sqlalchemy import create_engine import psycopg2 debug = False verbose = True use_pandas = False if use_pandas: print("WARNING: under developpement, this will probably fail because of memory limitation.") if debug: limit = 10000 verbose = True def main( path_cfg: str = typer.Argument(..., help="Data configuration file."), path_sec: Optional[str] = typer.Option(None, "-sec", help="Path to PG configuration file.") ): with open(path_cfg) as f_cfg: cfg = json.load(f_cfg) if verbose: print(cfg) if path_sec is None: pat = cfg['dbname'][0:15] lf = glob.glob( os.environ['HOME'] + '/.cred/ofm/database/'+pat+"*.json" ) if len(lf) > 1: print("ERROR: there is more than one credential file matching.") print(" Please use -sec to pass path_sec.") sys.exit(1) else: path_sec = lf[0] with open(path_sec) as f_sec: sec = json.load(f_sec) if verbose: print(sec) if not cfg['dbname'] == sec['POSTGRES_DBNAME']: typer.echo("ERROR: Something is wrong, database missmatch") typer.echo(cfg['dbname']) typer.echo(sec['POSTGRES_DBNAME']) raise typer.Exit(1) output_dir = f"{cfg["delivery"]}/csv" if not os.path.isdir(output_dir): os.makedirs(output_dir) # A long string that contains the necessary Postgres login information postgres_secret = ('postgresql://{username}:{password}@{ipaddress}:{port}/{dbname}'.format( username=sec['POSTGRES_USERNAME'], password=sec['POSTGRES_PASSWORD'], ipaddress=sec['POSTGRES_ADDRESS'], port=sec['POSTGRES_PORT'], dbname=sec['POSTGRES_DBNAME'] )) # Create the engine engine = create_engine(postgres_secret) df = pd.read_sql_query(''' SELECT * FROM pg_catalog.pg_tables WHERE schemaname != 'pg_catalog' AND schemaname != 'information_schema';''', engine) mask_dvf = (df.schemaname == 'dvf') | (df.schemaname == 'dvf_annexe') tablenames = (df[mask_dvf].schemaname + '.' + df[mask_dvf].tablename).tolist() del df conn = psycopg2.connect(host=sec['POSTGRES_ADDRESS'], port=sec['POSTGRES_PORT'], dbname=sec['POSTGRES_DBNAME'], user=sec['POSTGRES_USERNAME'], password=sec['POSTGRES_PASSWORD']) cur = conn.cursor() def read_write_table(tablename, conn, cur): # Because BigQuery GIS do not support EWKB (2021). df = pd.read_sql(f"select * from {tablename} LIMIT 1", conn) lcol = df.columns.tolist() for i, col in enumerate(lcol): if re.match(r"^geom.*$", col): #lcol[i] = f"ST_AsText(ST_Transform({col}, 4326)) as {col}" # If there is an importation problem try, GeoJSON #if col == 'geompar': # lcol[i] = f"REPLACE(ST_AsGeoJSON(ST_Transform({col}, 4326)), '] ], [ [', '] ] ], [ [ [') as {col}" # lcol[i] = f"ST_AsGeoJSON(ST_MakeValid(ST_Transform({col}, 4326))) as {col}" #else: lcol[i] = f"ST_AsGeoJSON(ST_Transform({col}, 4326)) as {col}" lcol = ", ".join(lcol) if debug: subquery = f"SELECT {lcol} FROM {tablename} LIMIT {limit}" else: subquery = f"SELECT {lcol} FROM {tablename}" # Use the COPY function on the SQL we created above. query = "COPY ({0}) TO STDOUT WITH CSV HEADER".format(subquery) # Set up a variable to store our file path and name. opath = output_dir+os.sep+tablename+".csv" try: with open(opath, 'w') as f_output: if verbose: print("dump with query:") print(query) cur.copy_expert(query, f_output) if verbose: print("finish") except psycopg2.Error as e: t_message = "\nERROR: \n" + str(e) + \ "\nsubquery: " + \ subquery + \ "\nopath: " + opath + "\n\n" print(t_message) raise def read_write_table_pd(tablename): if debug: df = pd.read_sql_query( f'''SELECT * FROM {tablename} LIMIT {limit}''', engine ) else: df = pd.read_sql_query( f'''SELECT * FROM {tablename}''', engine ) if verbose: print(df) opath = output_dir+os.sep+tablename+".csv" if verbose: print(opath) if os.path.exists(opath): typer.echo(f"Skipping file {opath}") else: if verbose: print("Writting table") df.to_csv(opath, index=False) del df for it, tablename in enumerate(tablenames): if verbose: print(it+1, tablename) print("Loading table") if use_pandas: read_write_table_pd(tablename) else: read_write_table(tablename, conn, cur) # Clean up: Close the database cursor and connection if not use_pandas: cur.close() conn.close() if __name__ == "__main__": typer.run(main)
#!/usr/bin/env python from pdb import set_trace as bp import json import os import re import sys import glob import pandas as pd import typer from typing import Optional from sqlalchemy import create_engine import psycopg2 debug = False verbose = True use_pandas = False if use_pandas: print("WARNING: under developpement, this will probably fail because of memory limitation.") if debug: limit = 10000 verbose = True def main( path_cfg: str = typer.Argument(..., help="Data configuration file."), path_sec: Optional[str] = typer.Option(None, "-sec", help="Path to PG configuration file.") ): with open(path_cfg) as f_cfg: cfg = json.load(f_cfg) if verbose: print(cfg) if path_sec is None: pat = cfg['dbname'][0:15] lf = glob.glob( os.environ['HOME'] + '/.cred/ofm/database/'+pat+"*.json" ) if len(lf) > 1: print("ERROR: there is more than one credential file matching.") print(" Please use -sec to pass path_sec.") sys.exit(1) else: path_sec = lf[0] with open(path_sec) as f_sec: sec = json.load(f_sec) if verbose: print(sec) if not cfg['dbname'] == sec['POSTGRES_DBNAME']: typer.echo("ERROR: Something is wrong, database missmatch") typer.echo(cfg['dbname']) typer.echo(sec['POSTGRES_DBNAME']) raise typer.Exit(1) output_dir = f"{cfg['delivery']}/csv" if not os.path.isdir(output_dir): os.makedirs(output_dir) # A long string that contains the necessary Postgres login information postgres_secret = ('postgresql://{username}:{password}@{ipaddress}:{port}/{dbname}'.format( username=sec['POSTGRES_USERNAME'], password=sec['POSTGRES_PASSWORD'], ipaddress=sec['POSTGRES_ADDRESS'], port=sec['POSTGRES_PORT'], dbname=sec['POSTGRES_DBNAME'] )) # Create the engine engine = create_engine(postgres_secret) df = pd.read_sql_query(''' SELECT * FROM pg_catalog.pg_tables WHERE schemaname != 'pg_catalog' AND schemaname != 'information_schema';''', engine) mask_dvf = (df.schemaname == 'dvf') | (df.schemaname == 'dvf_annexe') tablenames = (df[mask_dvf].schemaname + '.' + df[mask_dvf].tablename).tolist() del df conn = psycopg2.connect(host=sec['POSTGRES_ADDRESS'], port=sec['POSTGRES_PORT'], dbname=sec['POSTGRES_DBNAME'], user=sec['POSTGRES_USERNAME'], password=sec['POSTGRES_PASSWORD']) cur = conn.cursor() def read_write_table(tablename, conn, cur): # Because BigQuery GIS do not support EWKB (2021). df = pd.read_sql(f"select * from {tablename} LIMIT 1", conn) lcol = df.columns.tolist() for i, col in enumerate(lcol): if re.match(r"^geom.*$", col): #lcol[i] = f"ST_AsText(ST_Transform({col}, 4326)) as {col}" # If there is an importation problem try, GeoJSON #if col == 'geompar': # lcol[i] = f"REPLACE(ST_AsGeoJSON(ST_Transform({col}, 4326)), '] ], [ [', '] ] ], [ [ [') as {col}" # lcol[i] = f"ST_AsGeoJSON(ST_MakeValid(ST_Transform({col}, 4326))) as {col}" #else: lcol[i] = f"ST_AsGeoJSON(ST_Transform({col}, 4326)) as {col}" lcol = ", ".join(lcol) if debug: subquery = f"SELECT {lcol} FROM {tablename} LIMIT {limit}" else: subquery = f"SELECT {lcol} FROM {tablename}" # Use the COPY function on the SQL we created above. query = "COPY ({0}) TO STDOUT WITH CSV HEADER".format(subquery) # Set up a variable to store our file path and name. opath = output_dir+os.sep+tablename+".csv" try: with open(opath, 'w') as f_output: if verbose: print("dump with query:") print(query) cur.copy_expert(query, f_output) if verbose: print("finish") except psycopg2.Error as e: t_message = "\nERROR: \n" + str(e) + \ "\nsubquery: " + \ subquery + \ "\nopath: " + opath + "\n\n" print(t_message) raise def read_write_table_pd(tablename): if debug: df = pd.read_sql_query( f'''SELECT * FROM {tablename} LIMIT {limit}''', engine ) else: df = pd.read_sql_query( f'''SELECT * FROM {tablename}''', engine ) if verbose: print(df) opath = output_dir+os.sep+tablename+".csv" if verbose: print(opath) if os.path.exists(opath): typer.echo(f"Skipping file {opath}") else: if verbose: print("Writting table") df.to_csv(opath, index=False) del df for it, tablename in enumerate(tablenames): if verbose: print(it+1, tablename) print("Loading table") if use_pandas: read_write_table_pd(tablename) else: read_write_table(tablename, conn, cur) # Clean up: Close the database cursor and connection if not use_pandas: cur.close() conn.close() if __name__ == "__main__": typer.run(main)
# Copyright 2015-2016 The Meson development team # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. '''This module provides helper functions for Gnome/GLib related functionality such as gobject-introspection, gresources and gtk-doc''' import copy import functools import os import subprocess import textwrap import typing as T from . import ExtensionModule from . import GResourceTarget, GResourceHeaderTarget, GirTarget, TypelibTarget, VapiTarget from . import ModuleReturnValue from .. import build from .. import interpreter from .. import mesonlib from .. import mlog from ..build import CustomTarget, CustomTargetIndex, GeneratedList, InvalidArguments from ..dependencies import Dependency, PkgConfigDependency, InternalDependency from ..interpreter.type_checking import DEPEND_FILES_KW, INSTALL_KW, NoneType, in_set_validator from ..interpreterbase import noPosargs, noKwargs, permittedKwargs, FeatureNew, FeatureDeprecatedKwargs from ..interpreterbase import typed_kwargs, KwargInfo, ContainerTypeInfo, FeatureDeprecated from ..interpreterbase.decorators import typed_pos_args from ..mesonlib import ( MachineChoice, MesonException, OrderedSet, Popen_safe, join_args, ) from ..programs import ExternalProgram, OverrideProgram if T.TYPE_CHECKING: from typing_extensions import Literal, TypedDict from . import ModuleState from ..compilers import Compiler from ..interpreter import Interpreter from ..interpreterbase import TYPE_var, TYPE_kwargs from ..mesonlib import FileOrString class PostInstall(TypedDict): glib_compile_schemas: bool gio_querymodules: T.List[str] gtk_update_icon_cache: bool update_desktop_database: bool class CompileSchemas(TypedDict): build_by_default: bool depend_files: T.List[FileOrString] class Yelp(TypedDict): languages: T.List[str] media: T.List[str] sources: T.List[str] symlink_media: bool class CompileResources(TypedDict): build_by_default: bool c_name: T.Optional[str] dependencies: T.List[T.Union[mesonlib.File, build.CustomTarget, build.CustomTargetIndex]] export: bool extra_args: T.List[str] gresource_bundle: bool install: bool install_dir: T.Optional[str] install_header: bool source_dir: T.List[str] class GenerateGir(TypedDict): build_by_default: bool dependencies: T.List[Dependency] export_packages: T.List[str] extra_args: T.List[str] fatal_warnings: bool header: T.List[str] identifier_prefix: T.List[str] include_directories: T.List[T.Union[build.IncludeDirs, str]] includes: T.List[T.Union[str, GirTarget]] install: bool install_dir_gir: T.Optional[str] install_dir_typelib: T.Optional[str] link_with: T.List[T.Union[build.SharedLibrary, build.StaticLibrary]] namespace: str nsversion: str sources: T.List[T.Union[FileOrString, build.GeneratedTypes]] symbol_prefix: T.List[str] class GtkDoc(TypedDict): src_dir: T.List[T.Union[str, build.IncludeDirs]] main_sgml: str main_xml: str module_version: str namespace: str mode: Literal['xml', 'smgl', 'auto', 'none'] html_args: T.List[str] scan_args: T.List[str] scanobjs_args: T.List[str] fixxref_args: T.List[str] mkdb_args: T.List[str] content_files: T.List[T.Union[build.GeneratedTypes, FileOrString]] ignore_headers: T.List[str] install_dir: T.List[str] check: bool install: bool gobject_typesfile: T.List[str] html_assets: T.List[str] expand_content_files: T.List[str] c_args: T.List[str] include_directories: T.List[T.Union[str, build.IncludeDirs]] dependencies: T.List[T.Union[Dependency, build.SharedLibrary, build.StaticLibrary]] class GdbusCodegen(TypedDict): sources: T.List[FileOrString] extra_args: T.List[str] interface_prefix: T.Optional[str] namespace: T.Optional[str] object_manager: bool build_by_default: bool annotations: T.List[str] install_header: bool install_dir: T.Optional[str] docbook: T.Optional[str] autocleanup: Literal['all', 'none', 'objects', 'default'] # Differs from the CustomTarget version in that it straight defaults to True _BUILD_BY_DEFAULT: KwargInfo[bool] = KwargInfo( 'build_by_default', bool, default=True, ) _EXTRA_ARGS_KW: KwargInfo[T.List[str]] = KwargInfo( 'extra_args', ContainerTypeInfo(list, str), default=[], listify=True, ) # gresource compilation is broken due to the way # the resource compiler and Ninja clash about it # # https://github.com/ninja-build/ninja/issues/1184 # https://bugzilla.gnome.org/show_bug.cgi?id=774368 gresource_dep_needed_version = '>= 2.51.1' native_glib_version = None class GnomeModule(ExtensionModule): def __init__(self, interpreter: 'Interpreter') -> None: super().__init__(interpreter) self.gir_dep = None self.install_glib_compile_schemas = False self.install_gio_querymodules = [] self.install_gtk_update_icon_cache = False self.install_update_desktop_database = False self.devenv = None self.methods.update({ 'post_install': self.post_install, 'compile_resources': self.compile_resources, 'generate_gir': self.generate_gir, 'compile_schemas': self.compile_schemas, 'yelp': self.yelp, 'gtkdoc': self.gtkdoc, 'gtkdoc_html_dir': self.gtkdoc_html_dir, 'gdbus_codegen': self.gdbus_codegen, 'mkenums': self.mkenums, 'mkenums_simple': self.mkenums_simple, 'genmarshal': self.genmarshal, 'generate_vapi': self.generate_vapi, }) @staticmethod def _get_native_glib_version(state: 'ModuleState') -> str: global native_glib_version if native_glib_version is None: glib_dep = PkgConfigDependency('glib-2.0', state.environment, {'native': True, 'required': False}) if glib_dep.found(): native_glib_version = glib_dep.get_version() else: mlog.warning('Could not detect glib version, assuming 2.54. ' 'You may get build errors if your glib is older.') native_glib_version = '2.54' return native_glib_version @mesonlib.run_once def __print_gresources_warning(self, state: 'ModuleState') -> None: if not mesonlib.version_compare(self._get_native_glib_version(state), gresource_dep_needed_version): mlog.warning('GLib compiled dependencies do not work reliably with \n' 'the current version of GLib. See the following upstream issue:', mlog.bold('https://bugzilla.gnome.org/show_bug.cgi?id=774368')) @staticmethod def _print_gdbus_warning() -> None: mlog.warning('Code generated with gdbus_codegen() requires the root directory be added to\n' ' include_directories of targets with GLib < 2.51.3:', mlog.bold('https://github.com/mesonbuild/meson/issues/1387'), once=True) def _get_dep(self, state: 'ModuleState', depname: str, native: bool = False, required: bool = True) -> Dependency: kwargs = {'native': native, 'required': required} return self.interpreter.func_dependency(state.current_node, [depname], kwargs) def _get_native_binary(self, state: 'ModuleState', name: str, depname: str, varname: str, required: bool = True) -> T.Union[ExternalProgram, OverrideProgram, 'build.Executable']: # Look in overrides in case glib/gtk/etc are built as subproject prog = self.interpreter.program_from_overrides([name], []) if prog is not None: return prog # Look in machine file prog_list = state.environment.lookup_binary_entry(MachineChoice.HOST, name) if prog_list is not None: return ExternalProgram.from_entry(name, prog_list) # Check if pkgconfig has a variable dep = self._get_dep(state, depname, native=True, required=False) if dep.found() and dep.type_name == 'pkgconfig': value = dep.get_pkgconfig_variable(varname, {}) if value: return ExternalProgram(name, [value]) # Normal program lookup return state.find_program(name, required=required) @typed_kwargs('gnome.post_install', KwargInfo('glib_compile_schemas', bool, default=False), KwargInfo('gio_querymodules', ContainerTypeInfo(list, str), default=[], listify=True), KwargInfo('gtk_update_icon_cache', bool, default=False), KwargInfo('update_desktop_database', bool, default=False, since='0.59.0'), ) @noPosargs @FeatureNew('gnome.post_install', '0.57.0') def post_install(self, state: 'ModuleState', args: T.List['TYPE_var'], kwargs: 'PostInstall') -> ModuleReturnValue: rv: T.List['build.ExecutableSerialisation'] = [] datadir_abs = os.path.join(state.environment.get_prefix(), state.environment.get_datadir()) if kwargs['glib_compile_schemas'] and not self.install_glib_compile_schemas: self.install_glib_compile_schemas = True prog = self._get_native_binary(state, 'glib-compile-schemas', 'gio-2.0', 'glib_compile_schemas') schemasdir = os.path.join(datadir_abs, 'glib-2.0', 'schemas') script = state.backend.get_executable_serialisation([prog, schemasdir]) script.skip_if_destdir = True rv.append(script) for d in kwargs['gio_querymodules']: if d not in self.install_gio_querymodules: self.install_gio_querymodules.append(d) prog = self._get_native_binary(state, 'gio-querymodules', 'gio-2.0', 'gio_querymodules') moduledir = os.path.join(state.environment.get_prefix(), d) script = state.backend.get_executable_serialisation([prog, moduledir]) script.skip_if_destdir = True rv.append(script) if kwargs['gtk_update_icon_cache'] and not self.install_gtk_update_icon_cache: self.install_gtk_update_icon_cache = True prog = self._get_native_binary(state, 'gtk4-update-icon-cache', 'gtk-4.0', 'gtk4_update_icon_cache', required=False) found = isinstance(prog, build.Executable) or prog.found() if not found: prog = self._get_native_binary(state, 'gtk-update-icon-cache', 'gtk+-3.0', 'gtk_update_icon_cache') icondir = os.path.join(datadir_abs, 'icons', 'hicolor') script = state.backend.get_executable_serialisation([prog, '-q', '-t', '-f', icondir]) script.skip_if_destdir = True rv.append(script) if kwargs['update_desktop_database'] and not self.install_update_desktop_database: self.install_update_desktop_database = True prog = self._get_native_binary(state, 'update-desktop-database', 'desktop-file-utils', 'update_desktop_database') appdir = os.path.join(datadir_abs, 'applications') script = state.backend.get_executable_serialisation([prog, '-q', appdir]) script.skip_if_destdir = True rv.append(script) return ModuleReturnValue(None, rv) @typed_pos_args('gnome.compile_resources', str, (str, mesonlib.File)) @typed_kwargs( 'gnome.compile_resources', _BUILD_BY_DEFAULT, _EXTRA_ARGS_KW, INSTALL_KW, INSTALL_KW.evolve(name='install_header', since='0.37.0'), KwargInfo('c_name', (str, NoneType)), KwargInfo('dependencies', ContainerTypeInfo(list, (mesonlib.File, build.CustomTarget, build.CustomTargetIndex)), default=[], listify=True), KwargInfo('export', bool, default=False, since='0.37.0'), KwargInfo('gresource_bundle', bool, default=False, since='0.37.0'), KwargInfo('install_dir', (str, NoneType)), KwargInfo('source_dir', ContainerTypeInfo(list, str), default=[], listify=True), ) def compile_resources(self, state: 'ModuleState', args: T.Tuple[str, 'FileOrString'], kwargs: 'CompileResources') -> 'ModuleReturnValue': self.__print_gresources_warning(state) glib_version = self._get_native_glib_version(state) glib_compile_resources = state.find_program('glib-compile-resources') cmd = [glib_compile_resources, '@INPUT@'] source_dirs = kwargs['source_dir'] dependencies = kwargs['dependencies'] target_name, input_file = args # Validate dependencies subdirs: T.List[str] = [] depends: T.List[T.Union[build.CustomTarget, build.CustomTargetIndex]] = [] for dep in dependencies: if isinstance(dep, mesonlib.File): subdirs.append(dep.subdir) else: depends.append(dep) subdirs.append(dep.get_subdir()) if not mesonlib.version_compare(glib_version, gresource_dep_needed_version): m = 'The "dependencies" argument of gnome.compile_resources() can not\n' \ 'be used with the current version of glib-compile-resources due to\n' \ '<https://bugzilla.gnome.org/show_bug.cgi?id=774368>' raise MesonException(m) if not mesonlib.version_compare(glib_version, gresource_dep_needed_version): # Resource xml files generated at build-time cannot be used with # gnome.compile_resources() because we need to scan the xml for # dependencies. Use configure_file() instead to generate it at # configure-time if isinstance(input_file, mesonlib.File): # glib-compile-resources will be run inside the source dir, # so we need either 'src_to_build' or the absolute path. # Absolute path is the easiest choice. if input_file.is_built: ifile = os.path.join(state.environment.get_build_dir(), input_file.subdir, input_file.fname) else: ifile = os.path.join(input_file.subdir, input_file.fname) else: ifile = os.path.join(state.subdir, input_file) depend_files, depends, subdirs = self._get_gresource_dependencies( state, ifile, source_dirs, dependencies) # Make source dirs relative to build dir now source_dirs = [os.path.join(state.build_to_src, state.subdir, d) for d in source_dirs] # Ensure build directories of generated deps are included source_dirs += subdirs # Always include current directory, but after paths set by user source_dirs.append(os.path.join(state.build_to_src, state.subdir)) for source_dir in OrderedSet(source_dirs): cmd += ['--sourcedir', source_dir] if kwargs['c_name']: cmd += ['--c-name', kwargs['c_name']] if not kwargs['export']: cmd += ['--internal'] cmd += ['--generate', '--target', '@OUTPUT@'] cmd += kwargs['extra_args'] gresource = kwargs['gresource_bundle'] if gresource: output = f'{target_name}.gresource' name = f'{target_name}_gresource' else: if 'c' in state.environment.coredata.compilers.host: output = f'{target_name}.c' name = f'{target_name}_c' elif 'cpp' in state.environment.coredata.compilers.host: output = f'{target_name}.cpp' name = f'{target_name}_cpp' else: raise MesonException('Compiling GResources into code is only supported in C and C++ projects') if kwargs['install'] and not gresource: raise MesonException('The install kwarg only applies to gresource bundles, see install_header') install_header = kwargs['install_header'] if install_header and gresource: raise MesonException('The install_header kwarg does not apply to gresource bundles') if install_header and not kwargs['export']: raise MesonException('GResource header is installed yet export is not enabled') c_kwargs: T.Dict[str, T.Any] = { 'build_by_default': kwargs['build_by_default'], 'depends': depends, 'input': input_file, 'install': kwargs['install'], 'install_dir': kwargs['install_dir'] or [], 'output': output, } if not mesonlib.version_compare(glib_version, gresource_dep_needed_version): # This will eventually go out of sync if dependencies are added c_kwargs['depend_files'] = depend_files c_kwargs['command'] = cmd else: depfile = f'{output}.d' c_kwargs['depfile'] = depfile c_kwargs['command'] = copy.copy(cmd) + ['--dependency-file', '@DEPFILE@'] target_c = GResourceTarget(name, state.subdir, state.subproject, c_kwargs) if gresource: # Only one target for .gresource files return ModuleReturnValue(target_c, [target_c]) h_kwargs: T.Dict[str, T.Any] = { 'command': cmd, 'input': input_file, 'output': f'{target_name}.h', # The header doesn't actually care about the files yet it errors if missing 'depends': depends, 'build_by_default': kwargs['build_by_default'], 'install_dir': kwargs['install_dir'] or [state.environment.coredata.get_option(mesonlib.OptionKey('includedir'))], } if install_header: h_kwargs['install'] = install_header target_h = GResourceHeaderTarget(f'{target_name}_h', state.subdir, state.subproject, h_kwargs) rv = [target_c, target_h] return ModuleReturnValue(rv, rv) def _get_gresource_dependencies( self, state: 'ModuleState', input_file: str, source_dirs: T.List[str], dependencies: T.Sequence[T.Union[mesonlib.File, build.CustomTarget, build.CustomTargetIndex]] ) -> T.Tuple[T.List[mesonlib.FileOrString], T.List[T.Union[build.CustomTarget, build.CustomTargetIndex]], T.List[str]]: cmd = ['glib-compile-resources', input_file, '--generate-dependencies'] # Prefer generated files over source files cmd += ['--sourcedir', state.subdir] # Current build dir for source_dir in source_dirs: cmd += ['--sourcedir', os.path.join(state.subdir, source_dir)] try: pc, stdout, stderr = Popen_safe(cmd, cwd=state.environment.get_source_dir()) except (FileNotFoundError, PermissionError): raise MesonException('Could not execute glib-compile-resources.') if pc.returncode != 0: m = f'glib-compile-resources failed to get dependencies for {cmd[1]}:\n{stderr}' mlog.warning(m) raise subprocess.CalledProcessError(pc.returncode, cmd) raw_dep_files: T.List[str] = stdout.split('\n')[:-1] depends: T.List[T.Union[build.CustomTarget, build.CustomTargetIndex]] = [] subdirs: T.List[str] = [] dep_files: T.List[mesonlib.FileOrString] = [] for resfile in raw_dep_files.copy(): resbasename = os.path.basename(resfile) for dep in dependencies: if isinstance(dep, mesonlib.File): if dep.fname != resbasename: continue raw_dep_files.remove(resfile) dep_files.append(dep) subdirs.append(dep.subdir) break elif isinstance(dep, (build.CustomTarget, build.CustomTargetIndex)): fname = None outputs = {(o, os.path.basename(o)) for o in dep.get_outputs()} for o, baseo in outputs: if baseo == resbasename: fname = o break if fname is not None: raw_dep_files.remove(resfile) depends.append(dep) subdirs.append(dep.get_subdir()) break else: # In generate-dependencies mode, glib-compile-resources doesn't raise # an error for missing resources but instead prints whatever filename # was listed in the input file. That's good because it means we can # handle resource files that get generated as part of the build, as # follows. # # If there are multiple generated resource files with the same basename # then this code will get confused. try: f = mesonlib.File.from_source_file(state.environment.get_source_dir(), ".", resfile) except MesonException: raise MesonException( f'Resource "{resfile}" listed in "{input_file}" was not found. ' 'If this is a generated file, pass the target that generates ' 'it to gnome.compile_resources() using the "dependencies" ' 'keyword argument.') raw_dep_files.remove(resfile) dep_files.append(f) dep_files.extend(raw_dep_files) return dep_files, depends, subdirs def _get_link_args(self, state: 'ModuleState', lib: T.Union[build.SharedLibrary, build.StaticLibrary], depends: T.List[build.BuildTarget], include_rpath: bool = False, use_gir_args: bool = False) -> T.List[str]: link_command: T.List[str] = [] # Construct link args if isinstance(lib, build.SharedLibrary): libdir = os.path.join(state.environment.get_build_dir(), state.backend.get_target_dir(lib)) link_command.append('-L' + libdir) if include_rpath: link_command.append('-Wl,-rpath,' + libdir) depends.append(lib) # Needed for the following binutils bug: # https://github.com/mesonbuild/meson/issues/1911 # However, g-ir-scanner does not understand -Wl,-rpath # so we need to use -L instead for d in state.backend.determine_rpath_dirs(lib): d = os.path.join(state.environment.get_build_dir(), d) link_command.append('-L' + d) if include_rpath: link_command.append('-Wl,-rpath,' + d) if use_gir_args and self._gir_has_option('--extra-library'): link_command.append('--extra-library=' + lib.name) else: link_command.append('-l' + lib.name) return link_command def _get_dependencies_flags( self, deps: T.Sequence[T.Union['Dependency', build.SharedLibrary, build.StaticLibrary]], state: 'ModuleState', depends: T.List[build.BuildTarget], include_rpath: bool = False, use_gir_args: bool = False, separate_nodedup: bool = False ) -> T.Tuple[OrderedSet[str], OrderedSet[str], OrderedSet[str], T.Optional[T.List[str]], OrderedSet[str]]: cflags: OrderedSet[str] = OrderedSet() internal_ldflags: OrderedSet[str] = OrderedSet() external_ldflags: OrderedSet[str] = OrderedSet() # External linker flags that can't be de-duped reliably because they # require two args in order, such as -framework AVFoundation external_ldflags_nodedup: T.List[str] = [] gi_includes: OrderedSet[str] = OrderedSet() deps = mesonlib.listify(deps) for dep in deps: if isinstance(dep, Dependency): girdir = dep.get_variable(pkgconfig='girdir', internal='girdir', default_value='') if girdir: assert isinstance(girdir, str), 'for mypy' gi_includes.update([girdir]) if isinstance(dep, InternalDependency): cflags.update(dep.get_compile_args()) cflags.update(state.get_include_args(dep.include_directories)) for lib in dep.libraries: if isinstance(lib, build.SharedLibrary): internal_ldflags.update(self._get_link_args(state, lib, depends, include_rpath)) libdepflags = self._get_dependencies_flags(lib.get_external_deps(), state, depends, include_rpath, use_gir_args, True) cflags.update(libdepflags[0]) internal_ldflags.update(libdepflags[1]) external_ldflags.update(libdepflags[2]) external_ldflags_nodedup += libdepflags[3] gi_includes.update(libdepflags[4]) extdepflags = self._get_dependencies_flags(dep.ext_deps, state, depends, include_rpath, use_gir_args, True) cflags.update(extdepflags[0]) internal_ldflags.update(extdepflags[1]) external_ldflags.update(extdepflags[2]) external_ldflags_nodedup += extdepflags[3] gi_includes.update(extdepflags[4]) for source in dep.sources: if isinstance(source, GirTarget): gi_includes.update([os.path.join(state.environment.get_build_dir(), source.get_subdir())]) # This should be any dependency other than an internal one. elif isinstance(dep, Dependency): cflags.update(dep.get_compile_args()) ldflags = iter(dep.get_link_args(raw=True)) for flag in ldflags: if (os.path.isabs(flag) and # For PkgConfigDependency only: getattr(dep, 'is_libtool', False)): lib_dir = os.path.dirname(flag) external_ldflags.update([f'-L{lib_dir}']) if include_rpath: external_ldflags.update([f'-Wl,-rpath {lib_dir}']) libname = os.path.basename(flag) if libname.startswith("lib"): libname = libname[3:] libname = libname.split(".so")[0] flag = f"-l{libname}" # FIXME: Hack to avoid passing some compiler options in if flag.startswith("-W"): continue # If it's a framework arg, slurp the framework name too # to preserve the order of arguments if flag == '-framework': external_ldflags_nodedup += [flag, next(ldflags)] else: external_ldflags.update([flag]) elif isinstance(dep, (build.StaticLibrary, build.SharedLibrary)): cflags.update(state.get_include_args(dep.get_include_dirs())) depends.append(dep) else: mlog.log(f'dependency {dep!r} not handled to build gir files') continue if use_gir_args and self._gir_has_option('--extra-library'): def fix_ldflags(ldflags: T.Iterable[str]) -> OrderedSet[str]: fixed_ldflags: OrderedSet[str] = OrderedSet() for ldflag in ldflags: if ldflag.startswith("-l"): ldflag = ldflag.replace('-l', '--extra-library=', 1) fixed_ldflags.add(ldflag) return fixed_ldflags internal_ldflags = fix_ldflags(internal_ldflags) external_ldflags = fix_ldflags(external_ldflags) if not separate_nodedup: external_ldflags.update(external_ldflags_nodedup) return cflags, internal_ldflags, external_ldflags, None, gi_includes else: return cflags, internal_ldflags, external_ldflags, external_ldflags_nodedup, gi_includes def _unwrap_gir_target(self, girtarget: T.Union[build.Executable, build.StaticLibrary, build.SharedLibrary], state: 'ModuleState' ) -> T.Union[build.Executable, build.StaticLibrary, build.SharedLibrary]: if not isinstance(girtarget, (build.Executable, build.SharedLibrary, build.StaticLibrary)): raise MesonException(f'Gir target must be an executable or library but is "{girtarget}" of type {type(girtarget).__name__}') STATIC_BUILD_REQUIRED_VERSION = ">=1.58.1" if isinstance(girtarget, (build.StaticLibrary)) and \ not mesonlib.version_compare( self._get_gir_dep(state)[0].get_version(), STATIC_BUILD_REQUIRED_VERSION): raise MesonException('Static libraries can only be introspected with GObject-Introspection ' + STATIC_BUILD_REQUIRED_VERSION) return girtarget def _devenv_prepend(self, varname: str, value: str) -> None: if self.devenv is None: self.devenv = build.EnvironmentVariables() self.interpreter.build.devenv.append(self.devenv) self.devenv.prepend(varname, [value]) def _get_gir_dep(self, state: 'ModuleState') -> T.Tuple[Dependency, T.Union[build.Executable, 'ExternalProgram', 'OverrideProgram'], T.Union[build.Executable, 'ExternalProgram', 'OverrideProgram']]: if not self.gir_dep: self.gir_dep = self._get_dep(state, 'gobject-introspection-1.0') self.giscanner = self._get_native_binary(state, 'g-ir-scanner', 'gobject-introspection-1.0', 'g_ir_scanner') self.gicompiler = self._get_native_binary(state, 'g-ir-compiler', 'gobject-introspection-1.0', 'g_ir_compiler') return self.gir_dep, self.giscanner, self.gicompiler @functools.lru_cache(maxsize=None) def _gir_has_option(self, option: str) -> bool: exe = self.giscanner if isinstance(exe, OverrideProgram): # Handle overridden g-ir-scanner assert option in {'--extra-library', '--sources-top-dirs'} return True p, o, _ = Popen_safe(exe.get_command() + ['--help'], stderr=subprocess.STDOUT) return p.returncode == 0 and option in o # May mutate depends and gir_inc_dirs def _scan_include(self, state: 'ModuleState', includes: T.List[T.Union[str, GirTarget]] ) -> T.Tuple[T.List[str], T.List[str], T.List[GirTarget]]: ret: T.List[str] = [] gir_inc_dirs: T.List[str] = [] depends: T.List[GirTarget] = [] for inc in includes: if isinstance(inc, str): ret += [f'--include={inc}'] elif isinstance(inc, GirTarget): gir_inc_dirs .append(os.path.join(state.environment.get_build_dir(), inc.get_subdir())) ret.append(f"--include-uninstalled={os.path.join(inc.get_subdir(), inc.get_basename())}") depends.append(inc) return ret, gir_inc_dirs, depends def _scan_langs(self, state: 'ModuleState', langs: T.Iterable[str]) -> T.List[str]: ret: T.List[str] = [] for lang in langs: link_args = state.environment.coredata.get_external_link_args(MachineChoice.HOST, lang) for link_arg in link_args: if link_arg.startswith('-L'): ret.append(link_arg) return ret def _scan_gir_targets(self, state: 'ModuleState', girtargets: T.List[build.BuildTarget]) -> T.List[T.Union[str, build.Executable]]: ret: T.List[T.Union[str, build.Executable]] = [] for girtarget in girtargets: if isinstance(girtarget, build.Executable): ret += ['--program', girtarget] else: # Because of https://gitlab.gnome.org/GNOME/gobject-introspection/merge_requests/72 # we can't use the full path until this is merged. libpath = os.path.join(girtarget.get_subdir(), girtarget.get_filename()) # Must use absolute paths here because g-ir-scanner will not # add them to the runtime path list if they're relative. This # means we cannot use @BUILD_ROOT@ build_root = state.environment.get_build_dir() if isinstance(girtarget, build.SharedLibrary): # need to put our output directory first as we need to use the # generated libraries instead of any possibly installed system/prefix # ones. ret += ["-L{}/{}".format(build_root, os.path.dirname(libpath))] libname = girtarget.get_basename() else: libname = os.path.join(f"{build_root}/{libpath}") ret += ['--library', libname] # Needed for the following binutils bug: # https://github.com/mesonbuild/meson/issues/1911 # However, g-ir-scanner does not understand -Wl,-rpath # so we need to use -L instead for d in state.backend.determine_rpath_dirs(girtarget): d = os.path.join(state.environment.get_build_dir(), d) ret.append('-L' + d) return ret def _get_girtargets_langs_compilers(self, girtargets: T.Sequence[build.BuildTarget]) -> T.List[T.Tuple[str, 'Compiler']]: ret: T.List[T.Tuple[str, 'Compiler']] = [] for girtarget in girtargets: for lang, compiler in girtarget.compilers.items(): # XXX: Can you use g-i with any other language? if lang in ('c', 'cpp', 'objc', 'objcpp', 'd'): ret.append((lang, compiler)) break return ret def _get_gir_targets_deps(self, girtargets: T.Sequence[build.BuildTarget] ) -> T.List[T.Union[build.Target, Dependency]]: ret: T.List[T.Union[build.Target, Dependency]] = [] for girtarget in girtargets: ret += girtarget.get_all_link_deps() ret += girtarget.get_external_deps() return ret def _get_gir_targets_inc_dirs(self, girtargets: T.List[build.BuildTarget]) -> T.List[build.IncludeDirs]: ret: T.List[build.IncludeDirs] = [] for girtarget in girtargets: ret += girtarget.get_include_dirs() return ret def _get_langs_compilers_flags(self, state: 'ModuleState', langs_compilers: T.List[T.Tuple[str, 'Compiler']] ) -> T.Tuple[T.List[str], T.List[str], T.List[str]]: cflags: T.List[str] = [] internal_ldflags: T.List[str] = [] external_ldflags: T.List[str] = [] for lang, compiler in langs_compilers: if state.global_args.get(lang): cflags += state.global_args[lang] if state.project_args.get(lang): cflags += state.project_args[lang] if mesonlib.OptionKey('b_sanitize') in compiler.base_options: sanitize = state.environment.coredata.options[mesonlib.OptionKey('b_sanitize')].value cflags += compiler.sanitizer_compile_args(sanitize) sanitize = sanitize.split(',') # These must be first in ldflags if 'address' in sanitize: internal_ldflags += ['-lasan'] if 'thread' in sanitize: internal_ldflags += ['-ltsan'] if 'undefined' in sanitize: internal_ldflags += ['-lubsan'] # FIXME: Linking directly to lib*san is not recommended but g-ir-scanner # does not understand -f LDFLAGS. https://bugzilla.gnome.org/show_bug.cgi?id=783892 # ldflags += compiler.sanitizer_link_args(sanitize) return cflags, internal_ldflags, external_ldflags def _make_gir_filelist(self, state: 'ModuleState', srcdir: str, ns: str, nsversion: str, girtargets: T.List[build.BuildTarget], libsources: T.Sequence[T.Union[ str, mesonlib.File, build.GeneratedList, build.CustomTarget, build.CustomTargetIndex]] ) -> str: gir_filelist_dir = state.backend.get_target_private_dir_abs(girtargets[0]) if not os.path.isdir(gir_filelist_dir): os.mkdir(gir_filelist_dir) gir_filelist_filename = os.path.join(gir_filelist_dir, f'{ns}_{nsversion}_gir_filelist') with open(gir_filelist_filename, 'w', encoding='utf-8') as gir_filelist: for s in libsources: if isinstance(s, (build.CustomTarget, build.CustomTargetIndex)): for custom_output in s.get_outputs(): gir_filelist.write(os.path.join(state.environment.get_build_dir(), state.backend.get_target_dir(s), custom_output) + '\n') elif isinstance(s, mesonlib.File): gir_filelist.write(s.rel_to_builddir(state.build_to_src) + '\n') elif isinstance(s, build.GeneratedList): for gen_src in s.get_outputs(): gir_filelist.write(os.path.join(srcdir, gen_src) + '\n') else: gir_filelist.write(os.path.join(srcdir, s) + '\n') return gir_filelist_filename def _make_gir_target(self, state: 'ModuleState', girfile: str, scan_command: T.List[str], generated_files: T.Sequence[T.Union[str, mesonlib.File, build.CustomTarget, build.CustomTargetIndex, build.GeneratedList]], depends: T.List[build.Target], kwargs: T.Dict[str, T.Any]) -> GirTarget: scankwargs = { 'input': generated_files, 'output': girfile, 'command': scan_command, 'depends': depends, 'install': kwargs['install'], 'install_dir': kwargs['install_dir_gir'] or os.path.join(state.environment.get_datadir(), 'gir-1.0'), 'install_tag': 'devel', 'build_by_default': kwargs['build_by_default'], } return GirTarget(girfile, state.subdir, state.subproject, scankwargs) def _make_typelib_target(self, state: 'ModuleState', typelib_output: str, typelib_cmd: T.List[str], generated_files: T.Sequence[T.Union[str, mesonlib.File, build.CustomTarget, build.CustomTargetIndex, build.GeneratedList]], kwargs: T.Dict[str, T.Any]) -> TypelibTarget: typelib_kwargs = { 'input': generated_files, 'output': [typelib_output], 'command': typelib_cmd, 'install': kwargs['install'], 'install_dir': kwargs['install_dir_typelib'] or os.path.join(state.environment.get_libdir(), 'girepository-1.0'), 'install_tag': 'typelib', 'build_by_default': kwargs['build_by_default'], } return TypelibTarget(typelib_output, state.subdir, state.subproject, typelib_kwargs) # May mutate depends def _gather_typelib_includes_and_update_depends(self, state: 'ModuleState', deps: T.List[Dependency], depends: T.List[build.Target]) -> T.List[str]: # Need to recursively add deps on GirTarget sources from our # dependencies and also find the include directories needed for the # typelib generation custom target below. typelib_includes: T.List[str] = [] for dep in deps: # Add a dependency on each GirTarget listed in dependencies and add # the directory where it will be generated to the typelib includes if isinstance(dep, InternalDependency): for source in dep.sources: if isinstance(source, GirTarget) and source not in depends: depends.append(source) subdir = os.path.join(state.environment.get_build_dir(), source.get_subdir()) if subdir not in typelib_includes: typelib_includes.append(subdir) # Do the same, but for dependencies of dependencies. These are # stored in the list of generated sources for each link dep (from # girtarget.get_all_link_deps() above). # FIXME: Store this in the original form from declare_dependency() # so it can be used here directly. elif isinstance(dep, build.SharedLibrary): for source in dep.generated: if isinstance(source, GirTarget): subdir = os.path.join(state.environment.get_build_dir(), source.get_subdir()) if subdir not in typelib_includes: typelib_includes.append(subdir) if isinstance(dep, Dependency): girdir = dep.get_variable(pkgconfig='girdir', internal='girdir', default_value='') assert isinstance(girdir, str), 'for mypy' if girdir and girdir not in typelib_includes: typelib_includes.append(girdir) return typelib_includes def _get_external_args_for_langs(self, state: 'ModuleState', langs: T.Sequence[str]) -> T.List[str]: ret: T.List[str] = [] for lang in langs: ret += mesonlib.listify(state.environment.coredata.get_external_args(MachineChoice.HOST, lang)) return ret @staticmethod def _get_scanner_cflags(cflags: T.Iterable[str]) -> T.Iterable[str]: 'g-ir-scanner only accepts -I/-D/-U; must ignore all other flags' for f in cflags: # _FORTIFY_SOURCE depends on / works together with -O, on the other hand this # just invokes the preprocessor anyway if f.startswith(('-D', '-U', '-I')) and not f.startswith('-D_FORTIFY_SOURCE'): yield f @staticmethod def _get_scanner_ldflags(ldflags: T.Iterable[str]) -> T.Iterable[str]: 'g-ir-scanner only accepts -L/-l; must ignore -F and other linker flags' for f in ldflags: if f.startswith(('-L', '-l', '--extra-library')): yield f @typed_pos_args('gnome.generate_gir', varargs=(build.Executable, build.SharedLibrary, build.StaticLibrary), min_varargs=1) @typed_kwargs( 'gnome.generate_gir', INSTALL_KW, _BUILD_BY_DEFAULT.evolve(since='0.40.0'), _EXTRA_ARGS_KW, KwargInfo('dependencies', ContainerTypeInfo(list, Dependency), default=[], listify=True), KwargInfo('export_packages', ContainerTypeInfo(list, str), default=[], listify=True), KwargInfo('fatal_warnings', bool, default=False, since='0.55.0'), KwargInfo('header', ContainerTypeInfo(list, str), default=[], listify=True), KwargInfo('identifier_prefix', ContainerTypeInfo(list, str), default=[], listify=True), KwargInfo('include_directories', ContainerTypeInfo(list, (str, build.IncludeDirs)), default=[], listify=True), KwargInfo('includes', ContainerTypeInfo(list, (str, GirTarget)), default=[], listify=True), KwargInfo('install_dir_gir', (str, NoneType)), KwargInfo('install_dir_typelib', (str, NoneType)), KwargInfo('link_with', ContainerTypeInfo(list, (build.SharedLibrary, build.StaticLibrary)), default=[], listify=True), KwargInfo('namespace', str, required=True), KwargInfo('nsversion', str, required=True), KwargInfo('sources', ContainerTypeInfo(list, (str, mesonlib.File, build.GeneratedList, build.CustomTarget, build.CustomTargetIndex)), default=[], listify=True), KwargInfo('symbol_prefix', ContainerTypeInfo(list, str), default=[], listify=True), ) def generate_gir(self, state: 'ModuleState', args: T.Tuple[T.List[T.Union[build.Executable, build.SharedLibrary, build.StaticLibrary]]], kwargs: 'GenerateGir') -> ModuleReturnValue: girtargets = [self._unwrap_gir_target(arg, state) for arg in args[0]] if len(girtargets) > 1 and any([isinstance(el, build.Executable) for el in girtargets]): raise MesonException('generate_gir only accepts a single argument when one of the arguments is an executable') gir_dep, giscanner, gicompiler = self._get_gir_dep(state) ns = kwargs['namespace'] nsversion = kwargs['nsversion'] libsources = kwargs['sources'] girfile = f'{ns}-{nsversion}.gir' srcdir = os.path.join(state.environment.get_source_dir(), state.subdir) builddir = os.path.join(state.environment.get_build_dir(), state.subdir) depends: T.List[T.Union['FileOrString', build.GeneratedTypes, build.Executable, build.SharedLibrary, build.StaticLibrary]] = [] depends.extend(gir_dep.sources) depends.extend(girtargets) langs_compilers = self._get_girtargets_langs_compilers(girtargets) cflags, internal_ldflags, external_ldflags = self._get_langs_compilers_flags(state, langs_compilers) deps = self._get_gir_targets_deps(girtargets) deps += kwargs['dependencies'] deps += [gir_dep] typelib_includes = self._gather_typelib_includes_and_update_depends(state, deps, depends) # ldflags will be misinterpreted by gir scanner (showing # spurious dependencies) but building GStreamer fails if they # are not used here. dep_cflags, dep_internal_ldflags, dep_external_ldflags, _, gi_includes = \ self._get_dependencies_flags(deps, state, depends, use_gir_args=True) scan_cflags = [] scan_cflags += list(self._get_scanner_cflags(cflags)) scan_cflags += list(self._get_scanner_cflags(dep_cflags)) scan_cflags += list(self._get_scanner_cflags(self._get_external_args_for_langs(state, [lc[0] for lc in langs_compilers]))) scan_internal_ldflags = [] scan_internal_ldflags += list(self._get_scanner_ldflags(internal_ldflags)) scan_internal_ldflags += list(self._get_scanner_ldflags(dep_internal_ldflags)) scan_external_ldflags = [] scan_external_ldflags += list(self._get_scanner_ldflags(external_ldflags)) scan_external_ldflags += list(self._get_scanner_ldflags(dep_external_ldflags)) girtargets_inc_dirs = self._get_gir_targets_inc_dirs(girtargets) inc_dirs = kwargs['include_directories'] gir_inc_dirs: T.List[str] = [] scan_command: T.List[T.Union[str, build.Executable, 'ExternalProgram', 'OverrideProgram']] = [giscanner] scan_command += ['--no-libtool'] scan_command += ['--namespace=' + ns, '--nsversion=' + nsversion] scan_command += ['--warn-all'] scan_command += ['--output', '@OUTPUT@'] scan_command += [f'--c-include={h}' for h in kwargs['header']] scan_command += kwargs['extra_args'] scan_command += ['-I' + srcdir, '-I' + builddir] scan_command += state.get_include_args(girtargets_inc_dirs) scan_command += ['--filelist=' + self._make_gir_filelist(state, srcdir, ns, nsversion, girtargets, libsources)] scan_command += mesonlib.listify([self._get_link_args(state, l, depends, use_gir_args=True) for l in kwargs['link_with']]) _cmd, _ginc, _deps = self._scan_include(state, kwargs['includes']) scan_command.extend(_cmd) gir_inc_dirs.extend(_ginc) depends.extend(_deps) scan_command += [f'--symbol-prefix={p}' for p in kwargs['symbol_prefix']] scan_command += [f'--identifier-prefix={p}' for p in kwargs['identifier_prefix']] scan_command += [f'--pkg-export={p}' for p in kwargs['export_packages']] scan_command += ['--cflags-begin'] scan_command += scan_cflags scan_command += ['--cflags-end'] scan_command += state.get_include_args(inc_dirs) scan_command += state.get_include_args(list(gi_includes) + gir_inc_dirs + inc_dirs, prefix='--add-include-path=') scan_command += list(scan_internal_ldflags) scan_command += self._scan_gir_targets(state, girtargets) scan_command += self._scan_langs(state, [lc[0] for lc in langs_compilers]) scan_command += list(scan_external_ldflags) if self._gir_has_option('--sources-top-dirs'): scan_command += ['--sources-top-dirs', os.path.join(state.environment.get_source_dir(), self.interpreter.subproject_dir, state.subproject)] scan_command += ['--sources-top-dirs', os.path.join(state.environment.get_build_dir(), self.interpreter.subproject_dir, state.subproject)] if '--warn-error' in scan_command: FeatureDeprecated.single_use('gnome.generate_gir argument --warn-error', '0.55.0', state.subproject, 'Use "fatal_warnings" keyword argument', state.current_node) if kwargs['fatal_warnings']: scan_command.append('--warn-error') generated_files = [f for f in libsources if isinstance(f, (GeneratedList, CustomTarget, CustomTargetIndex))] scan_target = self._make_gir_target(state, girfile, scan_command, generated_files, depends, kwargs) typelib_output = f'{ns}-{nsversion}.typelib' typelib_cmd = [gicompiler, scan_target, '--output', '@OUTPUT@'] typelib_cmd += state.get_include_args(gir_inc_dirs, prefix='--includedir=') for incdir in typelib_includes: typelib_cmd += ["--includedir=" + incdir] typelib_target = self._make_typelib_target(state, typelib_output, typelib_cmd, generated_files, kwargs) self._devenv_prepend('GI_TYPELIB_PATH', os.path.join(state.environment.get_build_dir(), state.subdir)) rv = [scan_target, typelib_target] return ModuleReturnValue(rv, rv) @noPosargs @typed_kwargs('gnome.compile_schemas', _BUILD_BY_DEFAULT.evolve(since='0.40.0'), DEPEND_FILES_KW) def compile_schemas(self, state: 'ModuleState', args: T.List['TYPE_var'], kwargs: 'CompileSchemas') -> ModuleReturnValue: srcdir = os.path.join(state.build_to_src, state.subdir) outdir = state.subdir cmd = [state.find_program('glib-compile-schemas'), '--targetdir', outdir, srcdir] ct_kwargs = T.cast(T.Dict[str, T.Any], kwargs.copy()) ct_kwargs['command'] = cmd ct_kwargs['input'] = [] ct_kwargs['output'] = 'gschemas.compiled' if state.subdir == '': targetname = 'gsettings-compile' else: targetname = 'gsettings-compile-' + state.subdir.replace('/', '_') target_g = build.CustomTarget(targetname, state.subdir, state.subproject, ct_kwargs) self._devenv_prepend('GSETTINGS_SCHEMA_DIR', os.path.join(state.environment.get_build_dir(), state.subdir)) return ModuleReturnValue(target_g, [target_g]) @FeatureDeprecatedKwargs('gnome.yelp', '0.43.0', ['languages'], 'Use a LINGUAS file in the source directory instead') @typed_pos_args('gnome.yelp', str, varargs=str) @typed_kwargs( 'gnome.yelp', KwargInfo('languages', ContainerTypeInfo(list, str), listify=True, default=[]), KwargInfo('media', ContainerTypeInfo(list, str), listify=True, default=[]), KwargInfo('sources', ContainerTypeInfo(list, str), listify=True, default=[]), KwargInfo('symlink_media', bool, default=True), ) def yelp(self, state: 'ModuleState', args: T.Tuple[str, T.List[str]], kwargs: 'Yelp') -> ModuleReturnValue: project_id = args[0] sources = kwargs['sources'] if args[1]: FeatureDeprecated.single_use('gnome.yelp more than one positional argument', '0.60.0', state.subproject, 'use the "sources" keyword argument instead.', state.current_node) if not sources: sources = args[1] if not sources: raise MesonException('Yelp requires a list of sources') elif args[1]: mlog.warning('"gnome.yelp" ignores positional sources arguments when the "sources" keyword argument is set') source_str = '@@'.join(sources) langs = kwargs['languages'] script = state.environment.get_build_command() inscript_args = ['--internal', 'yelphelper', 'install', '--subdir=' + state.subdir, '--id=' + project_id, '--installdir=' + os.path.join(state.environment.get_datadir(), 'help'), '--sources=' + source_str] if kwargs['symlink_media']: inscript_args.append('--symlinks=true') if kwargs['media']: inscript_args.append('--media=' + '@@'.join(kwargs['media'])) if langs: inscript_args.append('--langs=' + '@@'.join(langs)) inscript = state.backend.get_executable_serialisation(script + inscript_args) potargs = state.environment.get_build_command() + [ '--internal', 'yelphelper', 'pot', '--subdir=' + state.subdir, '--id=' + project_id, '--sources=' + source_str, ] pottarget = build.RunTarget('help-' + project_id + '-pot', potargs, [], state.subdir, state.subproject) poargs = state.environment.get_build_command() + [ '--internal', 'yelphelper', 'update-po', '--subdir=' + state.subdir, '--id=' + project_id, '--sources=' + source_str, '--langs=' + '@@'.join(langs), ] potarget = build.RunTarget('help-' + project_id + '-update-po', poargs, [], state.subdir, state.subproject) rv: T.List[T.Union[build.ExecutableSerialisation, build.RunTarget]] = [inscript, pottarget, potarget] return ModuleReturnValue(None, rv) @typed_pos_args('gnome.gtkdoc', str) @typed_kwargs( 'gnome.gtkdoc', KwargInfo('c_args', ContainerTypeInfo(list, str), since='0.48.0', default=[], listify=True), KwargInfo('check', bool, default=False, since='0.52.0'), KwargInfo('content_files', ContainerTypeInfo(list, (str, mesonlib.File, build.GeneratedList, build.CustomTarget, build.CustomTargetIndex)), default=[], listify=True), KwargInfo( 'dependencies', ContainerTypeInfo(list, (Dependency, build.SharedLibrary, build.StaticLibrary)), listify=True, default=[]), KwargInfo('expand_content_files', ContainerTypeInfo(list, str), default=[], listify=True), KwargInfo('fixxref_args', ContainerTypeInfo(list, str), default=[], listify=True), KwargInfo('gobject_typesfile', ContainerTypeInfo(list, str), default=[], listify=True), KwargInfo('html_args', ContainerTypeInfo(list, str), default=[], listify=True), KwargInfo('html_assets', ContainerTypeInfo(list, str), default=[], listify=True), KwargInfo('ignore_headers', ContainerTypeInfo(list, str), default=[], listify=True), KwargInfo( 'include_directories', ContainerTypeInfo(list, (str, build.IncludeDirs)), listify=True, default=[]), KwargInfo('install', bool, default=True), KwargInfo('install_dir', ContainerTypeInfo(list, str), default=[], listify=True), KwargInfo('main_sgml', (str, NoneType)), KwargInfo('main_xml', (str, NoneType)), KwargInfo('mkdb_args', ContainerTypeInfo(list, str), default=[], listify=True), KwargInfo( 'mode', str, default='auto', since='0.37.0', validator=in_set_validator({'xml', 'sgml', 'none', 'auto'})), KwargInfo('module_version', str, default='', since='0.48.0'), KwargInfo('namespace', str, default='', since='0.37.0'), KwargInfo('scan_args', ContainerTypeInfo(list, str), default=[], listify=True), KwargInfo('scanobjs_args', ContainerTypeInfo(list, str), default=[], listify=True), KwargInfo('src_dir', ContainerTypeInfo(list, (str, build.IncludeDirs)), listify=True, required=True), ) def gtkdoc(self, state: 'ModuleState', args: T.Tuple[str], kwargs: 'GtkDoc') -> ModuleReturnValue: modulename = args[0] main_file = kwargs['main_sgml'] main_xml = kwargs['main_xml'] if main_xml is not None: if main_file is not None: raise InvalidArguments('gnome.gtkdoc: main_xml and main_xgml are exclusive arguments') main_file = main_xml moduleversion = kwargs['module_version'] targetname = modulename + ('-' + moduleversion if moduleversion else '') + '-doc' command = state.environment.get_build_command() namespace = kwargs['namespace'] def abs_filenames(files: T.Iterable['FileOrString']) -> T.Iterator[str]: for f in files: if isinstance(f, mesonlib.File): yield f.absolute_path(state.environment.get_source_dir(), state.environment.get_build_dir()) else: yield os.path.join(state.environment.get_source_dir(), state.subdir, f) src_dirs = kwargs['src_dir'] header_dirs: T.List[str] = [] for src_dir in src_dirs: if isinstance(src_dir, build.IncludeDirs): header_dirs.extend(src_dir.to_string_list(state.environment.get_source_dir(), state.environment.get_build_dir())) else: header_dirs.append(src_dir) t_args = ['--internal', 'gtkdoc', '--sourcedir=' + state.environment.get_source_dir(), '--builddir=' + state.environment.get_build_dir(), '--subdir=' + state.subdir, '--headerdirs=' + '@@'.join(header_dirs), '--mainfile=' + main_file, '--modulename=' + modulename, '--moduleversion=' + moduleversion, '--mode=' + kwargs['mode']] for tool in ['scan', 'scangobj', 'mkdb', 'mkhtml', 'fixxref']: program_name = 'gtkdoc-' + tool program = state.find_program(program_name) path = program.get_path() t_args.append(f'--{program_name}={path}') if namespace: t_args.append('--namespace=' + namespace) t_args.append(f'--htmlargs={'@@'.join(kwargs['html_args'])}') t_args.append(f'--scanargs={'@@'.join(kwargs['scan_args'])}') t_args.append(f'--scanobjsargs={'@@'.join(kwargs['scanobjs_args'])}') t_args.append(f'--gobjects-types-file={'@@'.join(abs_filenames(kwargs['gobject_typesfile']))}') t_args.append(f'--fixxrefargs={'@@'.join(kwargs['fixxref_args'])}') t_args.append(f'--mkdbargs={'@@'.join(kwargs['mkdb_args'])}') t_args.append(f'--html-assets={'@@'.join(abs_filenames(kwargs['html_assets']))}') depends: T.List['build.GeneratedTypes'] = [] content_files = [] for s in kwargs['content_files']: if isinstance(s, (build.CustomTarget, build.CustomTargetIndex)): depends.append(s) for o in s.get_outputs(): content_files.append(os.path.join(state.environment.get_build_dir(), state.backend.get_target_dir(s), o)) elif isinstance(s, mesonlib.File): content_files.append(s.absolute_path(state.environment.get_source_dir(), state.environment.get_build_dir())) elif isinstance(s, build.GeneratedList): depends.append(s) for gen_src in s.get_outputs(): content_files.append(os.path.join(state.environment.get_source_dir(), state.subdir, gen_src)) else: content_files.append(os.path.join(state.environment.get_source_dir(), state.subdir, s)) t_args += ['--content-files=' + '@@'.join(content_files)] t_args.append(f'--expand-content-files={'@@'.join(abs_filenames(kwargs['expand_content_files']))}') t_args.append(f'--ignore-headers={'@@'.join(kwargs['ignore_headers'])}') t_args.append(f'--installdir={'@@'.join(kwargs['install_dir'])}') t_args += self._get_build_args(kwargs['c_args'], kwargs['include_directories'], kwargs['dependencies'], state, depends) custom_kwargs = {'output': modulename + '-decl.txt', 'command': command + t_args, 'depends': depends, 'build_always_stale': True, } custom_target = build.CustomTarget(targetname, state.subdir, state.subproject, custom_kwargs) alias_target = build.AliasTarget(targetname, [custom_target], state.subdir, state.subproject) if kwargs['check']: check_cmd = state.find_program('gtkdoc-check') check_env = ['DOC_MODULE=' + modulename, 'DOC_MAIN_SGML_FILE=' + main_file] check_args = (targetname + '-check', check_cmd) check_workdir = os.path.join(state.environment.get_build_dir(), state.subdir) state.test(check_args, env=check_env, workdir=check_workdir, depends=[custom_target]) res: T.List[T.Union[build.Target, build.ExecutableSerialisation]] = [custom_target, alias_target] if kwargs['install']: res.append(state.backend.get_executable_serialisation(command + t_args, tag='doc')) return ModuleReturnValue(custom_target, res) def _get_build_args(self, c_args: T.List[str], inc_dirs: T.List[T.Union[str, build.IncludeDirs]], deps: T.List[T.Union[Dependency, build.SharedLibrary, build.StaticLibrary]], state: 'ModuleState', depends: T.List[build.BuildTarget]) -> T.List[str]: args: T.List[str] = [] cflags = c_args.copy() deps_cflags, internal_ldflags, external_ldflags, *_ = \ self._get_dependencies_flags(deps, state, depends, include_rpath=True) cflags.extend(deps_cflags) cflags.extend(state.get_include_args(inc_dirs)) ldflags: T.List[str] = [] ldflags.extend(internal_ldflags) ldflags.extend(external_ldflags) cflags.extend(state.environment.coredata.get_external_args(MachineChoice.HOST, 'c')) ldflags.extend(state.environment.coredata.get_external_link_args(MachineChoice.HOST, 'c')) compiler = state.environment.coredata.compilers[MachineChoice.HOST]['c'] compiler_flags = self._get_langs_compilers_flags(state, [('c', compiler)]) cflags.extend(compiler_flags[0]) ldflags.extend(compiler_flags[1]) ldflags.extend(compiler_flags[2]) if compiler: args += ['--cc=%s' % join_args(compiler.get_exelist())] args += ['--ld=%s' % join_args(compiler.get_linker_exelist())] if cflags: args += ['--cflags=%s' % join_args(cflags)] if ldflags: args += ['--ldflags=%s' % join_args(ldflags)] return args @noKwargs @typed_pos_args('gnome.gtkdoc_html_dir', str) def gtkdoc_html_dir(self, state: 'ModuleState', args: T.Tuple[str], kwargs: 'TYPE_kwargs') -> str: return os.path.join('share/gtk-doc/html', args[0]) @typed_pos_args('gnome.gdbus_codegen', str, optargs=[str]) @typed_kwargs( 'gnome.gdbus_codegen', _BUILD_BY_DEFAULT.evolve(since='0.40.0'), KwargInfo('sources', ContainerTypeInfo(list, (str, mesonlib.File)), since='0.46.0', default=[], listify=True), KwargInfo('extra_args', ContainerTypeInfo(list, str), since='0.47.0', default=[], listify=True), KwargInfo('interface_prefix', (str, NoneType)), KwargInfo('namespace', (str, NoneType)), KwargInfo('object_manager', bool, default=False), KwargInfo( 'annotations', ContainerTypeInfo(list, str), listify=True, default=[], validator=lambda x: 'must be made up of 3 strings for ELEMENT, KEY, and VALUE' if len(x) != 3 else None ), KwargInfo('install_header', bool, default=False, since='0.46.0'), KwargInfo('install_dir', (str, NoneType), since='0.46.0'), KwargInfo('docbook', (str, NoneType)), KwargInfo( 'autocleanup', str, default='default', since='0.47.0', validator=in_set_validator({'all', 'none', 'objects'})), ) def gdbus_codegen(self, state: 'ModuleState', args: T.Tuple[str, T.Optional[str]], kwargs: 'GdbusCodegen') -> ModuleReturnValue: namebase = args[0] xml_files: T.List['FileOrString'] = [args[1]] if args[1] else [] cmd: T.List[T.Union['ExternalProgram', str]] = [state.find_program('gdbus-codegen')] cmd.extend(kwargs['extra_args']) # Autocleanup supported? glib_version = self._get_native_glib_version(state) if not mesonlib.version_compare(glib_version, '>= 2.49.1'): # Warn if requested, silently disable if not if kwargs['autocleanup'] != 'default': mlog.warning(f'Glib version ({glib_version}) is too old to support the \'autocleanup\' ' 'kwarg, need 2.49.1 or newer') else: # Handle legacy glib versions that don't have autocleanup ac = kwargs['autocleanup'] if ac == 'default': ac = 'all' cmd.extend(['--c-generate-autocleanup', ac]) if kwargs['interface_prefix'] is not None: cmd.extend(['--interface-prefix', kwargs['interface_prefix']]) if kwargs['namespace'] is not None: cmd.extend(['--c-namespace', kwargs['namespace']]) if kwargs['object_manager']: cmd.extend(['--c-generate-object-manager']) xml_files.extend(kwargs['sources']) build_by_default = kwargs['build_by_default'] # Annotations are a bit ugly in that they are a list of lists of strings... if kwargs['annotations']: cmd.append('--annotate') cmd.extend(kwargs['annotations']) targets = [] install_header = kwargs['install_header'] install_dir = kwargs['install_dir'] or state.environment.coredata.get_option(mesonlib.OptionKey('includedir')) assert isinstance(install_dir, str), 'for mypy' output = namebase + '.c' # Added in https://gitlab.gnome.org/GNOME/glib/commit/e4d68c7b3e8b01ab1a4231bf6da21d045cb5a816 (2.55.2) # Fixed in https://gitlab.gnome.org/GNOME/glib/commit/cd1f82d8fc741a2203582c12cc21b4dacf7e1872 (2.56.2) if mesonlib.version_compare(glib_version, '>= 2.56.2'): custom_kwargs = {'input': xml_files, 'output': output, 'command': cmd + ['--body', '--output', '@OUTPUT@', '@INPUT@'], 'build_by_default': build_by_default } else: if kwargs['docbook'] is not None: docbook = kwargs['docbook'] cmd += ['--generate-docbook', docbook] # https://git.gnome.org/browse/glib/commit/?id=ee09bb704fe9ccb24d92dd86696a0e6bb8f0dc1a if mesonlib.version_compare(glib_version, '>= 2.51.3'): cmd += ['--output-directory', '@OUTDIR@', '--generate-c-code', namebase, '@INPUT@'] else: self._print_gdbus_warning() cmd += ['--generate-c-code', '@OUTDIR@/' + namebase, '@INPUT@'] custom_kwargs = {'input': xml_files, 'output': output, 'command': cmd, 'build_by_default': build_by_default } cfile_custom_target = build.CustomTarget(output, state.subdir, state.subproject, custom_kwargs) targets.append(cfile_custom_target) output = namebase + '.h' if mesonlib.version_compare(glib_version, '>= 2.56.2'): custom_kwargs = {'input': xml_files, 'output': output, 'command': cmd + ['--header', '--output', '@OUTPUT@', '@INPUT@'], 'build_by_default': build_by_default, 'install': install_header, 'install_dir': install_dir } else: custom_kwargs = {'input': xml_files, 'output': output, 'command': cmd, 'build_by_default': build_by_default, 'install': install_header, 'install_dir': install_dir, 'depends': cfile_custom_target } hfile_custom_target = build.CustomTarget(output, state.subdir, state.subproject, custom_kwargs) targets.append(hfile_custom_target) if kwargs['docbook'] is not None: docbook = kwargs['docbook'] if not isinstance(docbook, str): raise MesonException('docbook value must be a string.') docbook_cmd = cmd + ['--output-directory', '@OUTDIR@', '--generate-docbook', docbook, '@INPUT@'] # The docbook output is always ${docbook}-${name_of_xml_file} output = namebase + '-docbook' outputs = [] for f in xml_files: outputs.append('{}-{}'.format(docbook, os.path.basename(str(f)))) if mesonlib.version_compare(glib_version, '>= 2.56.2'): custom_kwargs = {'input': xml_files, 'output': outputs, 'command': docbook_cmd, 'build_by_default': build_by_default } else: custom_kwargs = {'input': xml_files, 'output': outputs, 'command': cmd, 'build_by_default': build_by_default, 'depends': cfile_custom_target } docbook_custom_target = build.CustomTarget(output, state.subdir, state.subproject, custom_kwargs) targets.append(docbook_custom_target) return ModuleReturnValue(targets, targets) @permittedKwargs({'sources', 'c_template', 'h_template', 'install_header', 'install_dir', 'comments', 'identifier_prefix', 'symbol_prefix', 'eprod', 'vprod', 'fhead', 'fprod', 'ftail', 'vhead', 'vtail', 'depends'}) @typed_pos_args('gnome.mkenums', str) def mkenums(self, state: 'ModuleState', args: T.Tuple[str], kwargs) -> ModuleReturnValue: basename = args[0] if 'sources' not in kwargs: raise MesonException('Missing keyword argument "sources".') sources = kwargs.pop('sources') if isinstance(sources, str): sources = [sources] elif not isinstance(sources, list): raise MesonException( 'Sources keyword argument must be a string or array.') cmd = [] known_kwargs = ['comments', 'eprod', 'fhead', 'fprod', 'ftail', 'identifier_prefix', 'symbol_prefix', 'template', 'vhead', 'vprod', 'vtail'] known_custom_target_kwargs = ['install_dir', 'build_always', 'depends', 'depend_files'] c_template = h_template = None install_header = False for arg, value in kwargs.items(): if arg == 'sources': raise AssertionError("sources should've already been handled") elif arg == 'c_template': c_template = value if isinstance(c_template, mesonlib.File): c_template = c_template.absolute_path(state.environment.source_dir, state.environment.build_dir) if 'template' in kwargs: raise MesonException('Mkenums does not accept both ' 'c_template and template keyword ' 'arguments at the same time.') elif arg == 'h_template': h_template = value if isinstance(h_template, mesonlib.File): h_template = h_template.absolute_path(state.environment.source_dir, state.environment.build_dir) if 'template' in kwargs: raise MesonException('Mkenums does not accept both ' 'h_template and template keyword ' 'arguments at the same time.') elif arg == 'install_header': install_header = value elif arg in known_kwargs: cmd += ['--' + arg.replace('_', '-'), value] elif arg not in known_custom_target_kwargs: raise MesonException( f'Mkenums does not take a {arg} keyword argument.') cmd = [state.find_program(['glib-mkenums', 'mkenums'])] + cmd custom_kwargs = {} for arg in known_custom_target_kwargs: if arg in kwargs: custom_kwargs[arg] = kwargs[arg] targets = [] if h_template is not None: h_output = os.path.basename(os.path.splitext(h_template)[0]) # We always set template as the first element in the source array # so --template consumes it. h_cmd = cmd + ['--template', '@INPUT@'] h_sources = [h_template] + sources # Copy so we don't mutate the arguments for the c_template h_kwargs = custom_kwargs.copy() h_kwargs['install'] = install_header if 'install_dir' not in h_kwargs: h_kwargs['install_dir'] = \ state.environment.coredata.get_option(mesonlib.OptionKey('includedir')) h_target = self._make_mkenum_custom_target(state, h_sources, h_output, h_cmd, h_kwargs) targets.append(h_target) if c_template is not None: c_output = os.path.basename(os.path.splitext(c_template)[0]) # We always set template as the first element in the source array # so --template consumes it. c_cmd = cmd + ['--template', '@INPUT@'] c_sources = [c_template] + sources c_kwargs = custom_kwargs.copy() # Never install the C file. Complain on bug tracker if you need it. c_kwargs['install'] = False c_kwargs['install_dir'] = [] if h_template is not None: if 'depends' in custom_kwargs: c_kwargs['depends'] += [h_target] else: c_kwargs['depends'] = h_target c_target = self._make_mkenum_custom_target(state, c_sources, c_output, c_cmd, c_kwargs) targets.insert(0, c_target) if c_template is None and h_template is None: generic_cmd = cmd + ['@INPUT@'] custom_kwargs['install'] = install_header if 'install_dir' not in custom_kwargs: custom_kwargs['install_dir'] = \ state.environment.coredata.get_option(mesonlib.OptionKey('includedir')) target = self._make_mkenum_custom_target(state, sources, basename, generic_cmd, custom_kwargs) return ModuleReturnValue(target, [target]) elif len(targets) == 1: return ModuleReturnValue(targets[0], [targets[0]]) else: return ModuleReturnValue(targets, targets) @FeatureNew('gnome.mkenums_simple', '0.42.0') @typed_pos_args('gnome.mkenums_simple', str) def mkenums_simple(self, state: 'ModuleState', args: T.Tuple[str], kwargs) -> ModuleReturnValue: hdr_filename = f'{args[0]}.h' body_filename = f'{args[0]}.c' # not really needed, just for sanity checking forbidden_kwargs = ['c_template', 'h_template', 'eprod', 'fhead', 'fprod', 'ftail', 'vhead', 'vtail', 'comments'] for arg in forbidden_kwargs: if arg in kwargs: raise MesonException(f'mkenums_simple() does not take a {arg} keyword argument') # kwargs to pass as-is from mkenums_simple() to mkenums() shared_kwargs = ['sources', 'install_header', 'install_dir', 'identifier_prefix', 'symbol_prefix'] mkenums_kwargs = {} for arg in shared_kwargs: if arg in kwargs: mkenums_kwargs[arg] = kwargs[arg] # .c file generation c_file_kwargs = copy.deepcopy(mkenums_kwargs) if 'sources' not in kwargs: raise MesonException('Missing keyword argument "sources".') sources = kwargs['sources'] if isinstance(sources, str): sources = [sources] elif not isinstance(sources, list): raise MesonException( 'Sources keyword argument must be a string or array.') # The `install_header` argument will be used by mkenums() when # not using template files, so we need to forcibly unset it # when generating the C source file, otherwise we will end up # installing it c_file_kwargs['install_header'] = False header_prefix = kwargs.get('header_prefix', '') decl_decorator = kwargs.get('decorator', '') func_prefix = kwargs.get('function_prefix', '') body_prefix = kwargs.get('body_prefix', '') # Maybe we should write our own template files into the build dir # instead, but that seems like much more work, nice as it would be. fhead = '' if body_prefix != '': fhead += '%s\n' % body_prefix fhead += '#include "%s"\n' % hdr_filename for hdr in sources: fhead += '#include "{}"\n'.format(os.path.basename(str(hdr))) fhead += textwrap.dedent( ''' #define C_ENUM(v) ((gint) v) #define C_FLAGS(v) ((guint) v) ''') c_file_kwargs['fhead'] = fhead c_file_kwargs['fprod'] = textwrap.dedent( ''' /* enumerations from "@basename@" */ ''') c_file_kwargs['vhead'] = textwrap.dedent( f''' GType {func_prefix}@enum_name@_get_type (void) {{ static gsize gtype_id = 0; static const G@Type@Value values[] = {{''') c_file_kwargs['vprod'] = ' { C_@TYPE@(@VALUENAME@), "@VALUENAME@", "@valuenick@" },' c_file_kwargs['vtail'] = textwrap.dedent( ''' { 0, NULL, NULL } }; if (g_once_init_enter (&gtype_id)) { GType new_type = g_@type@_register_static (g_intern_static_string ("@EnumName@"), values); g_once_init_leave (&gtype_id, new_type); } return (GType) gtype_id; }''') rv = self.mkenums(state, [body_filename], c_file_kwargs) c_file = rv.return_value # .h file generation h_file_kwargs = copy.deepcopy(mkenums_kwargs) h_file_kwargs['fhead'] = textwrap.dedent( f'''#pragma once #include <glib-object.h> {header_prefix} G_BEGIN_DECLS ''') h_file_kwargs['fprod'] = textwrap.dedent( ''' /* enumerations from "@basename@" */ ''') h_file_kwargs['vhead'] = textwrap.dedent( f''' {decl_decorator} GType {func_prefix}@enum_name@_get_type (void); #define @ENUMPREFIX@_TYPE_@ENUMSHORT@ ({func_prefix}@enum_name@_get_type())''') h_file_kwargs['ftail'] = textwrap.dedent( ''' G_END_DECLS''') rv = self.mkenums(state, [hdr_filename], h_file_kwargs) h_file = rv.return_value return ModuleReturnValue([c_file, h_file], [c_file, h_file]) @staticmethod def _make_mkenum_custom_target( state: 'ModuleState', sources: T.Sequence[T.Union[str, mesonlib.File, build.CustomTarget, build.CustomTargetIndex, build.GeneratedList]], output: str, cmd: T.List[str], kwargs: T.Dict[str, T.Any]) -> build.CustomTarget: custom_kwargs = { 'input': sources, 'output': [output], 'capture': True, 'command': cmd } custom_kwargs.update(kwargs) return build.CustomTarget(output, state.subdir, state.subproject, custom_kwargs, # https://github.com/mesonbuild/meson/issues/973 absolute_paths=True) @permittedKwargs({'sources', 'prefix', 'install_header', 'install_dir', 'stdinc', 'nostdinc', 'internal', 'skip_source', 'valist_marshallers', 'extra_args'}) @typed_pos_args('gnome.genmarshal', str) def genmarshal(self, state: 'ModuleState', args: T.Tuple[str], kwargs) -> ModuleReturnValue: output = args[0] if 'sources' not in kwargs: raise MesonException('Missing keyword argument "sources".') sources = kwargs.pop('sources') if isinstance(sources, str): sources = [sources] elif not isinstance(sources, list): raise MesonException( 'Sources keyword argument must be a string or array.') new_genmarshal = mesonlib.version_compare(self._get_native_glib_version(state), '>= 2.53.3') cmd = [state.find_program('glib-genmarshal')] known_kwargs = ['internal', 'nostdinc', 'skip_source', 'stdinc', 'valist_marshallers', 'extra_args'] known_custom_target_kwargs = ['build_always', 'depends', 'depend_files', 'install_dir', 'install_header'] for arg, value in kwargs.items(): if arg == 'prefix': cmd += ['--prefix', value] elif arg == 'extra_args': if new_genmarshal: cmd += mesonlib.stringlistify(value) else: mlog.warning('The current version of GLib does not support extra arguments \n' 'for glib-genmarshal. You need at least GLib 2.53.3. See ', mlog.bold('https://github.com/mesonbuild/meson/pull/2049')) elif arg in known_kwargs and value: cmd += ['--' + arg.replace('_', '-')] elif arg not in known_custom_target_kwargs: raise MesonException(f'Genmarshal does not take a {arg} keyword argument.') install_header = kwargs.pop('install_header', False) install_dir = kwargs.pop('install_dir', []) custom_kwargs = { 'input': sources, } # https://github.com/GNOME/glib/commit/0fbc98097fac4d3e647684f344e508abae109fdf if mesonlib.version_compare(self._get_native_glib_version(state), '>= 2.51.0'): cmd += ['--output', '@OUTPUT@'] else: custom_kwargs['capture'] = True for arg in known_custom_target_kwargs: if arg in kwargs: custom_kwargs[arg] = kwargs[arg] header_file = output + '.h' custom_kwargs['command'] = cmd + ['--body', '@INPUT@'] if mesonlib.version_compare(self._get_native_glib_version(state), '>= 2.53.4'): # Silence any warnings about missing prototypes custom_kwargs['command'] += ['--include-header', header_file] custom_kwargs['output'] = output + '.c' body = build.CustomTarget(output + '_c', state.subdir, state.subproject, custom_kwargs) custom_kwargs['install'] = install_header custom_kwargs['install_dir'] = install_dir if new_genmarshal: cmd += ['--pragma-once'] custom_kwargs['command'] = cmd + ['--header', '@INPUT@'] custom_kwargs['output'] = header_file header = build.CustomTarget(output + '_h', state.subdir, state.subproject, custom_kwargs) rv = [body, header] return ModuleReturnValue(rv, rv) @staticmethod def _vapi_args_to_command(prefix: str, variable: str, kwargs: T.Dict[str, T.Any], accept_vapi: bool = False) -> T.List[str]: arg_list = mesonlib.extract_as_list(kwargs, variable) ret: T.List[str] = [] for arg in arg_list: if not isinstance(arg, str): types = 'strings' + ' or InternalDependencys' if accept_vapi else '' raise MesonException(f'All {variable} must be {types}') ret.append(prefix + arg) return ret def _extract_vapi_packages(self, state: 'ModuleState', kwargs: T.Dict[str, T.Any] ) -> T.Tuple[T.List[str], T.List[build.Target], T.List[str], T.List[str]]: ''' Packages are special because we need to: - Get a list of packages for the .deps file - Get a list of depends for any VapiTargets - Get package name from VapiTargets - Add include dirs for any VapiTargets ''' arg_list = kwargs.get('packages') if not arg_list: return [], [], [], [] arg_list = mesonlib.listify(arg_list) vapi_depends: T.List[build.Target] = [] vapi_packages: T.List[str] = [] vapi_includes: T.List[str] = [] ret: T.List[str] = [] remaining_args = [] for arg in arg_list: if isinstance(arg, InternalDependency): targets = [t for t in arg.sources if isinstance(t, VapiTarget)] for target in targets: srcdir = os.path.join(state.environment.get_source_dir(), target.get_subdir()) outdir = os.path.join(state.environment.get_build_dir(), target.get_subdir()) outfile = target.get_outputs()[0][:-5] # Strip .vapi ret.append('--vapidir=' + outdir) ret.append('--girdir=' + outdir) ret.append('--pkg=' + outfile) vapi_depends.append(target) vapi_packages.append(outfile) vapi_includes.append(srcdir) else: assert isinstance(arg, str), 'for mypy' vapi_packages.append(arg) remaining_args.append(arg) kwargs['packages'] = remaining_args vapi_args = ret + self._vapi_args_to_command('--pkg=', 'packages', kwargs, accept_vapi=True) return vapi_args, vapi_depends, vapi_packages, vapi_includes def _generate_deps(self, state: 'ModuleState', library: str, packages: T.List[str], install_dir: str) -> build.Data: outdir = state.environment.scratch_dir fname = os.path.join(outdir, library + '.deps') with open(fname, 'w', encoding='utf-8') as ofile: for package in packages: ofile.write(package + '\n') return build.Data([mesonlib.File(True, outdir, fname)], install_dir, install_dir, mesonlib.FileMode(), state.subproject) def _get_vapi_link_with(self, target: build.CustomTarget) -> T.List[T.Union[build.BuildTarget, build.CustomTarget]]: link_with: T.List[T.Union[build.BuildTarget, build.CustomTarget]] = [] for dep in target.get_target_dependencies(): if isinstance(dep, build.SharedLibrary): link_with.append(dep) elif isinstance(dep, GirTarget): link_with += self._get_vapi_link_with(dep) return link_with @permittedKwargs({'sources', 'packages', 'metadata_dirs', 'gir_dirs', 'vapi_dirs', 'install', 'install_dir'}) @typed_pos_args('gnome.generate_vapi', str) def generate_vapi(self, state: 'ModuleState', args: T.Tuple[str], kwargs) -> ModuleReturnValue: created_values = [] library = args[0] build_dir = os.path.join(state.environment.get_build_dir(), state.subdir) source_dir = os.path.join(state.environment.get_source_dir(), state.subdir) pkg_cmd, vapi_depends, vapi_packages, vapi_includes = self._extract_vapi_packages(state, kwargs) cmd: T.List[T.Union[str, 'ExternalProgram']] cmd = [state.find_program('vapigen')] cmd += ['--quiet', '--library=' + library, '--directory=' + build_dir] cmd += self._vapi_args_to_command('--vapidir=', 'vapi_dirs', kwargs) cmd += self._vapi_args_to_command('--metadatadir=', 'metadata_dirs', kwargs) cmd += self._vapi_args_to_command('--girdir=', 'gir_dirs', kwargs) cmd += pkg_cmd cmd += ['--metadatadir=' + source_dir] if 'sources' not in kwargs: raise MesonException('sources are required to generate the vapi file') inputs = mesonlib.extract_as_list(kwargs, 'sources') link_with = [] for i in inputs: if isinstance(i, str): cmd.append(os.path.join(source_dir, i)) elif isinstance(i, GirTarget): link_with += self._get_vapi_link_with(i) subdir = os.path.join(state.environment.get_build_dir(), i.get_subdir()) gir_file = os.path.join(subdir, i.get_outputs()[0]) cmd.append(gir_file) else: raise MesonException('Input must be a str or GirTarget') vapi_output = library + '.vapi' custom_kwargs = { 'command': cmd, 'input': inputs, 'output': vapi_output, 'depends': vapi_depends, } install_dir = kwargs.get('install_dir', os.path.join(state.environment.coredata.get_option(mesonlib.OptionKey('datadir')), 'vala', 'vapi')) if kwargs.get('install'): custom_kwargs['install'] = kwargs['install'] custom_kwargs['install_dir'] = install_dir # We shouldn't need this locally but we install it deps_target = self._generate_deps(state, library, vapi_packages, install_dir) created_values.append(deps_target) vapi_target = VapiTarget(vapi_output, state.subdir, state.subproject, custom_kwargs) # So to try our best to get this to just work we need: # - link with with the correct library # - include the vapi and dependent vapi files in sources # - add relevant directories to include dirs incs = [build.IncludeDirs(state.subdir, ['.'] + vapi_includes, False)] sources = [vapi_target] + vapi_depends rv = InternalDependency(None, incs, [], [], link_with, [], sources, [], {}) created_values.append(rv) return ModuleReturnValue(rv, created_values) def initialize(interp: 'Interpreter') -> GnomeModule: mod = GnomeModule(interp) mod.interpreter.append_holder_map(GResourceTarget, interpreter.CustomTargetHolder) mod.interpreter.append_holder_map(GResourceHeaderTarget, interpreter.CustomTargetHolder) mod.interpreter.append_holder_map(GirTarget, interpreter.CustomTargetHolder) mod.interpreter.append_holder_map(TypelibTarget, interpreter.CustomTargetHolder) mod.interpreter.append_holder_map(VapiTarget, interpreter.CustomTargetHolder) return mod
# Copyright 2015-2016 The Meson development team # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. '''This module provides helper functions for Gnome/GLib related functionality such as gobject-introspection, gresources and gtk-doc''' import copy import functools import os import subprocess import textwrap import typing as T from . import ExtensionModule from . import GResourceTarget, GResourceHeaderTarget, GirTarget, TypelibTarget, VapiTarget from . import ModuleReturnValue from .. import build from .. import interpreter from .. import mesonlib from .. import mlog from ..build import CustomTarget, CustomTargetIndex, GeneratedList, InvalidArguments from ..dependencies import Dependency, PkgConfigDependency, InternalDependency from ..interpreter.type_checking import DEPEND_FILES_KW, INSTALL_KW, NoneType, in_set_validator from ..interpreterbase import noPosargs, noKwargs, permittedKwargs, FeatureNew, FeatureDeprecatedKwargs from ..interpreterbase import typed_kwargs, KwargInfo, ContainerTypeInfo, FeatureDeprecated from ..interpreterbase.decorators import typed_pos_args from ..mesonlib import ( MachineChoice, MesonException, OrderedSet, Popen_safe, join_args, ) from ..programs import ExternalProgram, OverrideProgram if T.TYPE_CHECKING: from typing_extensions import Literal, TypedDict from . import ModuleState from ..compilers import Compiler from ..interpreter import Interpreter from ..interpreterbase import TYPE_var, TYPE_kwargs from ..mesonlib import FileOrString class PostInstall(TypedDict): glib_compile_schemas: bool gio_querymodules: T.List[str] gtk_update_icon_cache: bool update_desktop_database: bool class CompileSchemas(TypedDict): build_by_default: bool depend_files: T.List[FileOrString] class Yelp(TypedDict): languages: T.List[str] media: T.List[str] sources: T.List[str] symlink_media: bool class CompileResources(TypedDict): build_by_default: bool c_name: T.Optional[str] dependencies: T.List[T.Union[mesonlib.File, build.CustomTarget, build.CustomTargetIndex]] export: bool extra_args: T.List[str] gresource_bundle: bool install: bool install_dir: T.Optional[str] install_header: bool source_dir: T.List[str] class GenerateGir(TypedDict): build_by_default: bool dependencies: T.List[Dependency] export_packages: T.List[str] extra_args: T.List[str] fatal_warnings: bool header: T.List[str] identifier_prefix: T.List[str] include_directories: T.List[T.Union[build.IncludeDirs, str]] includes: T.List[T.Union[str, GirTarget]] install: bool install_dir_gir: T.Optional[str] install_dir_typelib: T.Optional[str] link_with: T.List[T.Union[build.SharedLibrary, build.StaticLibrary]] namespace: str nsversion: str sources: T.List[T.Union[FileOrString, build.GeneratedTypes]] symbol_prefix: T.List[str] class GtkDoc(TypedDict): src_dir: T.List[T.Union[str, build.IncludeDirs]] main_sgml: str main_xml: str module_version: str namespace: str mode: Literal['xml', 'smgl', 'auto', 'none'] html_args: T.List[str] scan_args: T.List[str] scanobjs_args: T.List[str] fixxref_args: T.List[str] mkdb_args: T.List[str] content_files: T.List[T.Union[build.GeneratedTypes, FileOrString]] ignore_headers: T.List[str] install_dir: T.List[str] check: bool install: bool gobject_typesfile: T.List[str] html_assets: T.List[str] expand_content_files: T.List[str] c_args: T.List[str] include_directories: T.List[T.Union[str, build.IncludeDirs]] dependencies: T.List[T.Union[Dependency, build.SharedLibrary, build.StaticLibrary]] class GdbusCodegen(TypedDict): sources: T.List[FileOrString] extra_args: T.List[str] interface_prefix: T.Optional[str] namespace: T.Optional[str] object_manager: bool build_by_default: bool annotations: T.List[str] install_header: bool install_dir: T.Optional[str] docbook: T.Optional[str] autocleanup: Literal['all', 'none', 'objects', 'default'] # Differs from the CustomTarget version in that it straight defaults to True _BUILD_BY_DEFAULT: KwargInfo[bool] = KwargInfo( 'build_by_default', bool, default=True, ) _EXTRA_ARGS_KW: KwargInfo[T.List[str]] = KwargInfo( 'extra_args', ContainerTypeInfo(list, str), default=[], listify=True, ) # gresource compilation is broken due to the way # the resource compiler and Ninja clash about it # # https://github.com/ninja-build/ninja/issues/1184 # https://bugzilla.gnome.org/show_bug.cgi?id=774368 gresource_dep_needed_version = '>= 2.51.1' native_glib_version = None class GnomeModule(ExtensionModule): def __init__(self, interpreter: 'Interpreter') -> None: super().__init__(interpreter) self.gir_dep = None self.install_glib_compile_schemas = False self.install_gio_querymodules = [] self.install_gtk_update_icon_cache = False self.install_update_desktop_database = False self.devenv = None self.methods.update({ 'post_install': self.post_install, 'compile_resources': self.compile_resources, 'generate_gir': self.generate_gir, 'compile_schemas': self.compile_schemas, 'yelp': self.yelp, 'gtkdoc': self.gtkdoc, 'gtkdoc_html_dir': self.gtkdoc_html_dir, 'gdbus_codegen': self.gdbus_codegen, 'mkenums': self.mkenums, 'mkenums_simple': self.mkenums_simple, 'genmarshal': self.genmarshal, 'generate_vapi': self.generate_vapi, }) @staticmethod def _get_native_glib_version(state: 'ModuleState') -> str: global native_glib_version if native_glib_version is None: glib_dep = PkgConfigDependency('glib-2.0', state.environment, {'native': True, 'required': False}) if glib_dep.found(): native_glib_version = glib_dep.get_version() else: mlog.warning('Could not detect glib version, assuming 2.54. ' 'You may get build errors if your glib is older.') native_glib_version = '2.54' return native_glib_version @mesonlib.run_once def __print_gresources_warning(self, state: 'ModuleState') -> None: if not mesonlib.version_compare(self._get_native_glib_version(state), gresource_dep_needed_version): mlog.warning('GLib compiled dependencies do not work reliably with \n' 'the current version of GLib. See the following upstream issue:', mlog.bold('https://bugzilla.gnome.org/show_bug.cgi?id=774368')) @staticmethod def _print_gdbus_warning() -> None: mlog.warning('Code generated with gdbus_codegen() requires the root directory be added to\n' ' include_directories of targets with GLib < 2.51.3:', mlog.bold('https://github.com/mesonbuild/meson/issues/1387'), once=True) def _get_dep(self, state: 'ModuleState', depname: str, native: bool = False, required: bool = True) -> Dependency: kwargs = {'native': native, 'required': required} return self.interpreter.func_dependency(state.current_node, [depname], kwargs) def _get_native_binary(self, state: 'ModuleState', name: str, depname: str, varname: str, required: bool = True) -> T.Union[ExternalProgram, OverrideProgram, 'build.Executable']: # Look in overrides in case glib/gtk/etc are built as subproject prog = self.interpreter.program_from_overrides([name], []) if prog is not None: return prog # Look in machine file prog_list = state.environment.lookup_binary_entry(MachineChoice.HOST, name) if prog_list is not None: return ExternalProgram.from_entry(name, prog_list) # Check if pkgconfig has a variable dep = self._get_dep(state, depname, native=True, required=False) if dep.found() and dep.type_name == 'pkgconfig': value = dep.get_pkgconfig_variable(varname, {}) if value: return ExternalProgram(name, [value]) # Normal program lookup return state.find_program(name, required=required) @typed_kwargs('gnome.post_install', KwargInfo('glib_compile_schemas', bool, default=False), KwargInfo('gio_querymodules', ContainerTypeInfo(list, str), default=[], listify=True), KwargInfo('gtk_update_icon_cache', bool, default=False), KwargInfo('update_desktop_database', bool, default=False, since='0.59.0'), ) @noPosargs @FeatureNew('gnome.post_install', '0.57.0') def post_install(self, state: 'ModuleState', args: T.List['TYPE_var'], kwargs: 'PostInstall') -> ModuleReturnValue: rv: T.List['build.ExecutableSerialisation'] = [] datadir_abs = os.path.join(state.environment.get_prefix(), state.environment.get_datadir()) if kwargs['glib_compile_schemas'] and not self.install_glib_compile_schemas: self.install_glib_compile_schemas = True prog = self._get_native_binary(state, 'glib-compile-schemas', 'gio-2.0', 'glib_compile_schemas') schemasdir = os.path.join(datadir_abs, 'glib-2.0', 'schemas') script = state.backend.get_executable_serialisation([prog, schemasdir]) script.skip_if_destdir = True rv.append(script) for d in kwargs['gio_querymodules']: if d not in self.install_gio_querymodules: self.install_gio_querymodules.append(d) prog = self._get_native_binary(state, 'gio-querymodules', 'gio-2.0', 'gio_querymodules') moduledir = os.path.join(state.environment.get_prefix(), d) script = state.backend.get_executable_serialisation([prog, moduledir]) script.skip_if_destdir = True rv.append(script) if kwargs['gtk_update_icon_cache'] and not self.install_gtk_update_icon_cache: self.install_gtk_update_icon_cache = True prog = self._get_native_binary(state, 'gtk4-update-icon-cache', 'gtk-4.0', 'gtk4_update_icon_cache', required=False) found = isinstance(prog, build.Executable) or prog.found() if not found: prog = self._get_native_binary(state, 'gtk-update-icon-cache', 'gtk+-3.0', 'gtk_update_icon_cache') icondir = os.path.join(datadir_abs, 'icons', 'hicolor') script = state.backend.get_executable_serialisation([prog, '-q', '-t', '-f', icondir]) script.skip_if_destdir = True rv.append(script) if kwargs['update_desktop_database'] and not self.install_update_desktop_database: self.install_update_desktop_database = True prog = self._get_native_binary(state, 'update-desktop-database', 'desktop-file-utils', 'update_desktop_database') appdir = os.path.join(datadir_abs, 'applications') script = state.backend.get_executable_serialisation([prog, '-q', appdir]) script.skip_if_destdir = True rv.append(script) return ModuleReturnValue(None, rv) @typed_pos_args('gnome.compile_resources', str, (str, mesonlib.File)) @typed_kwargs( 'gnome.compile_resources', _BUILD_BY_DEFAULT, _EXTRA_ARGS_KW, INSTALL_KW, INSTALL_KW.evolve(name='install_header', since='0.37.0'), KwargInfo('c_name', (str, NoneType)), KwargInfo('dependencies', ContainerTypeInfo(list, (mesonlib.File, build.CustomTarget, build.CustomTargetIndex)), default=[], listify=True), KwargInfo('export', bool, default=False, since='0.37.0'), KwargInfo('gresource_bundle', bool, default=False, since='0.37.0'), KwargInfo('install_dir', (str, NoneType)), KwargInfo('source_dir', ContainerTypeInfo(list, str), default=[], listify=True), ) def compile_resources(self, state: 'ModuleState', args: T.Tuple[str, 'FileOrString'], kwargs: 'CompileResources') -> 'ModuleReturnValue': self.__print_gresources_warning(state) glib_version = self._get_native_glib_version(state) glib_compile_resources = state.find_program('glib-compile-resources') cmd = [glib_compile_resources, '@INPUT@'] source_dirs = kwargs['source_dir'] dependencies = kwargs['dependencies'] target_name, input_file = args # Validate dependencies subdirs: T.List[str] = [] depends: T.List[T.Union[build.CustomTarget, build.CustomTargetIndex]] = [] for dep in dependencies: if isinstance(dep, mesonlib.File): subdirs.append(dep.subdir) else: depends.append(dep) subdirs.append(dep.get_subdir()) if not mesonlib.version_compare(glib_version, gresource_dep_needed_version): m = 'The "dependencies" argument of gnome.compile_resources() can not\n' \ 'be used with the current version of glib-compile-resources due to\n' \ '<https://bugzilla.gnome.org/show_bug.cgi?id=774368>' raise MesonException(m) if not mesonlib.version_compare(glib_version, gresource_dep_needed_version): # Resource xml files generated at build-time cannot be used with # gnome.compile_resources() because we need to scan the xml for # dependencies. Use configure_file() instead to generate it at # configure-time if isinstance(input_file, mesonlib.File): # glib-compile-resources will be run inside the source dir, # so we need either 'src_to_build' or the absolute path. # Absolute path is the easiest choice. if input_file.is_built: ifile = os.path.join(state.environment.get_build_dir(), input_file.subdir, input_file.fname) else: ifile = os.path.join(input_file.subdir, input_file.fname) else: ifile = os.path.join(state.subdir, input_file) depend_files, depends, subdirs = self._get_gresource_dependencies( state, ifile, source_dirs, dependencies) # Make source dirs relative to build dir now source_dirs = [os.path.join(state.build_to_src, state.subdir, d) for d in source_dirs] # Ensure build directories of generated deps are included source_dirs += subdirs # Always include current directory, but after paths set by user source_dirs.append(os.path.join(state.build_to_src, state.subdir)) for source_dir in OrderedSet(source_dirs): cmd += ['--sourcedir', source_dir] if kwargs['c_name']: cmd += ['--c-name', kwargs['c_name']] if not kwargs['export']: cmd += ['--internal'] cmd += ['--generate', '--target', '@OUTPUT@'] cmd += kwargs['extra_args'] gresource = kwargs['gresource_bundle'] if gresource: output = f'{target_name}.gresource' name = f'{target_name}_gresource' else: if 'c' in state.environment.coredata.compilers.host: output = f'{target_name}.c' name = f'{target_name}_c' elif 'cpp' in state.environment.coredata.compilers.host: output = f'{target_name}.cpp' name = f'{target_name}_cpp' else: raise MesonException('Compiling GResources into code is only supported in C and C++ projects') if kwargs['install'] and not gresource: raise MesonException('The install kwarg only applies to gresource bundles, see install_header') install_header = kwargs['install_header'] if install_header and gresource: raise MesonException('The install_header kwarg does not apply to gresource bundles') if install_header and not kwargs['export']: raise MesonException('GResource header is installed yet export is not enabled') c_kwargs: T.Dict[str, T.Any] = { 'build_by_default': kwargs['build_by_default'], 'depends': depends, 'input': input_file, 'install': kwargs['install'], 'install_dir': kwargs['install_dir'] or [], 'output': output, } if not mesonlib.version_compare(glib_version, gresource_dep_needed_version): # This will eventually go out of sync if dependencies are added c_kwargs['depend_files'] = depend_files c_kwargs['command'] = cmd else: depfile = f'{output}.d' c_kwargs['depfile'] = depfile c_kwargs['command'] = copy.copy(cmd) + ['--dependency-file', '@DEPFILE@'] target_c = GResourceTarget(name, state.subdir, state.subproject, c_kwargs) if gresource: # Only one target for .gresource files return ModuleReturnValue(target_c, [target_c]) h_kwargs: T.Dict[str, T.Any] = { 'command': cmd, 'input': input_file, 'output': f'{target_name}.h', # The header doesn't actually care about the files yet it errors if missing 'depends': depends, 'build_by_default': kwargs['build_by_default'], 'install_dir': kwargs['install_dir'] or [state.environment.coredata.get_option(mesonlib.OptionKey('includedir'))], } if install_header: h_kwargs['install'] = install_header target_h = GResourceHeaderTarget(f'{target_name}_h', state.subdir, state.subproject, h_kwargs) rv = [target_c, target_h] return ModuleReturnValue(rv, rv) def _get_gresource_dependencies( self, state: 'ModuleState', input_file: str, source_dirs: T.List[str], dependencies: T.Sequence[T.Union[mesonlib.File, build.CustomTarget, build.CustomTargetIndex]] ) -> T.Tuple[T.List[mesonlib.FileOrString], T.List[T.Union[build.CustomTarget, build.CustomTargetIndex]], T.List[str]]: cmd = ['glib-compile-resources', input_file, '--generate-dependencies'] # Prefer generated files over source files cmd += ['--sourcedir', state.subdir] # Current build dir for source_dir in source_dirs: cmd += ['--sourcedir', os.path.join(state.subdir, source_dir)] try: pc, stdout, stderr = Popen_safe(cmd, cwd=state.environment.get_source_dir()) except (FileNotFoundError, PermissionError): raise MesonException('Could not execute glib-compile-resources.') if pc.returncode != 0: m = f'glib-compile-resources failed to get dependencies for {cmd[1]}:\n{stderr}' mlog.warning(m) raise subprocess.CalledProcessError(pc.returncode, cmd) raw_dep_files: T.List[str] = stdout.split('\n')[:-1] depends: T.List[T.Union[build.CustomTarget, build.CustomTargetIndex]] = [] subdirs: T.List[str] = [] dep_files: T.List[mesonlib.FileOrString] = [] for resfile in raw_dep_files.copy(): resbasename = os.path.basename(resfile) for dep in dependencies: if isinstance(dep, mesonlib.File): if dep.fname != resbasename: continue raw_dep_files.remove(resfile) dep_files.append(dep) subdirs.append(dep.subdir) break elif isinstance(dep, (build.CustomTarget, build.CustomTargetIndex)): fname = None outputs = {(o, os.path.basename(o)) for o in dep.get_outputs()} for o, baseo in outputs: if baseo == resbasename: fname = o break if fname is not None: raw_dep_files.remove(resfile) depends.append(dep) subdirs.append(dep.get_subdir()) break else: # In generate-dependencies mode, glib-compile-resources doesn't raise # an error for missing resources but instead prints whatever filename # was listed in the input file. That's good because it means we can # handle resource files that get generated as part of the build, as # follows. # # If there are multiple generated resource files with the same basename # then this code will get confused. try: f = mesonlib.File.from_source_file(state.environment.get_source_dir(), ".", resfile) except MesonException: raise MesonException( f'Resource "{resfile}" listed in "{input_file}" was not found. ' 'If this is a generated file, pass the target that generates ' 'it to gnome.compile_resources() using the "dependencies" ' 'keyword argument.') raw_dep_files.remove(resfile) dep_files.append(f) dep_files.extend(raw_dep_files) return dep_files, depends, subdirs def _get_link_args(self, state: 'ModuleState', lib: T.Union[build.SharedLibrary, build.StaticLibrary], depends: T.List[build.BuildTarget], include_rpath: bool = False, use_gir_args: bool = False) -> T.List[str]: link_command: T.List[str] = [] # Construct link args if isinstance(lib, build.SharedLibrary): libdir = os.path.join(state.environment.get_build_dir(), state.backend.get_target_dir(lib)) link_command.append('-L' + libdir) if include_rpath: link_command.append('-Wl,-rpath,' + libdir) depends.append(lib) # Needed for the following binutils bug: # https://github.com/mesonbuild/meson/issues/1911 # However, g-ir-scanner does not understand -Wl,-rpath # so we need to use -L instead for d in state.backend.determine_rpath_dirs(lib): d = os.path.join(state.environment.get_build_dir(), d) link_command.append('-L' + d) if include_rpath: link_command.append('-Wl,-rpath,' + d) if use_gir_args and self._gir_has_option('--extra-library'): link_command.append('--extra-library=' + lib.name) else: link_command.append('-l' + lib.name) return link_command def _get_dependencies_flags( self, deps: T.Sequence[T.Union['Dependency', build.SharedLibrary, build.StaticLibrary]], state: 'ModuleState', depends: T.List[build.BuildTarget], include_rpath: bool = False, use_gir_args: bool = False, separate_nodedup: bool = False ) -> T.Tuple[OrderedSet[str], OrderedSet[str], OrderedSet[str], T.Optional[T.List[str]], OrderedSet[str]]: cflags: OrderedSet[str] = OrderedSet() internal_ldflags: OrderedSet[str] = OrderedSet() external_ldflags: OrderedSet[str] = OrderedSet() # External linker flags that can't be de-duped reliably because they # require two args in order, such as -framework AVFoundation external_ldflags_nodedup: T.List[str] = [] gi_includes: OrderedSet[str] = OrderedSet() deps = mesonlib.listify(deps) for dep in deps: if isinstance(dep, Dependency): girdir = dep.get_variable(pkgconfig='girdir', internal='girdir', default_value='') if girdir: assert isinstance(girdir, str), 'for mypy' gi_includes.update([girdir]) if isinstance(dep, InternalDependency): cflags.update(dep.get_compile_args()) cflags.update(state.get_include_args(dep.include_directories)) for lib in dep.libraries: if isinstance(lib, build.SharedLibrary): internal_ldflags.update(self._get_link_args(state, lib, depends, include_rpath)) libdepflags = self._get_dependencies_flags(lib.get_external_deps(), state, depends, include_rpath, use_gir_args, True) cflags.update(libdepflags[0]) internal_ldflags.update(libdepflags[1]) external_ldflags.update(libdepflags[2]) external_ldflags_nodedup += libdepflags[3] gi_includes.update(libdepflags[4]) extdepflags = self._get_dependencies_flags(dep.ext_deps, state, depends, include_rpath, use_gir_args, True) cflags.update(extdepflags[0]) internal_ldflags.update(extdepflags[1]) external_ldflags.update(extdepflags[2]) external_ldflags_nodedup += extdepflags[3] gi_includes.update(extdepflags[4]) for source in dep.sources: if isinstance(source, GirTarget): gi_includes.update([os.path.join(state.environment.get_build_dir(), source.get_subdir())]) # This should be any dependency other than an internal one. elif isinstance(dep, Dependency): cflags.update(dep.get_compile_args()) ldflags = iter(dep.get_link_args(raw=True)) for flag in ldflags: if (os.path.isabs(flag) and # For PkgConfigDependency only: getattr(dep, 'is_libtool', False)): lib_dir = os.path.dirname(flag) external_ldflags.update([f'-L{lib_dir}']) if include_rpath: external_ldflags.update([f'-Wl,-rpath {lib_dir}']) libname = os.path.basename(flag) if libname.startswith("lib"): libname = libname[3:] libname = libname.split(".so")[0] flag = f"-l{libname}" # FIXME: Hack to avoid passing some compiler options in if flag.startswith("-W"): continue # If it's a framework arg, slurp the framework name too # to preserve the order of arguments if flag == '-framework': external_ldflags_nodedup += [flag, next(ldflags)] else: external_ldflags.update([flag]) elif isinstance(dep, (build.StaticLibrary, build.SharedLibrary)): cflags.update(state.get_include_args(dep.get_include_dirs())) depends.append(dep) else: mlog.log(f'dependency {dep!r} not handled to build gir files') continue if use_gir_args and self._gir_has_option('--extra-library'): def fix_ldflags(ldflags: T.Iterable[str]) -> OrderedSet[str]: fixed_ldflags: OrderedSet[str] = OrderedSet() for ldflag in ldflags: if ldflag.startswith("-l"): ldflag = ldflag.replace('-l', '--extra-library=', 1) fixed_ldflags.add(ldflag) return fixed_ldflags internal_ldflags = fix_ldflags(internal_ldflags) external_ldflags = fix_ldflags(external_ldflags) if not separate_nodedup: external_ldflags.update(external_ldflags_nodedup) return cflags, internal_ldflags, external_ldflags, None, gi_includes else: return cflags, internal_ldflags, external_ldflags, external_ldflags_nodedup, gi_includes def _unwrap_gir_target(self, girtarget: T.Union[build.Executable, build.StaticLibrary, build.SharedLibrary], state: 'ModuleState' ) -> T.Union[build.Executable, build.StaticLibrary, build.SharedLibrary]: if not isinstance(girtarget, (build.Executable, build.SharedLibrary, build.StaticLibrary)): raise MesonException(f'Gir target must be an executable or library but is "{girtarget}" of type {type(girtarget).__name__}') STATIC_BUILD_REQUIRED_VERSION = ">=1.58.1" if isinstance(girtarget, (build.StaticLibrary)) and \ not mesonlib.version_compare( self._get_gir_dep(state)[0].get_version(), STATIC_BUILD_REQUIRED_VERSION): raise MesonException('Static libraries can only be introspected with GObject-Introspection ' + STATIC_BUILD_REQUIRED_VERSION) return girtarget def _devenv_prepend(self, varname: str, value: str) -> None: if self.devenv is None: self.devenv = build.EnvironmentVariables() self.interpreter.build.devenv.append(self.devenv) self.devenv.prepend(varname, [value]) def _get_gir_dep(self, state: 'ModuleState') -> T.Tuple[Dependency, T.Union[build.Executable, 'ExternalProgram', 'OverrideProgram'], T.Union[build.Executable, 'ExternalProgram', 'OverrideProgram']]: if not self.gir_dep: self.gir_dep = self._get_dep(state, 'gobject-introspection-1.0') self.giscanner = self._get_native_binary(state, 'g-ir-scanner', 'gobject-introspection-1.0', 'g_ir_scanner') self.gicompiler = self._get_native_binary(state, 'g-ir-compiler', 'gobject-introspection-1.0', 'g_ir_compiler') return self.gir_dep, self.giscanner, self.gicompiler @functools.lru_cache(maxsize=None) def _gir_has_option(self, option: str) -> bool: exe = self.giscanner if isinstance(exe, OverrideProgram): # Handle overridden g-ir-scanner assert option in {'--extra-library', '--sources-top-dirs'} return True p, o, _ = Popen_safe(exe.get_command() + ['--help'], stderr=subprocess.STDOUT) return p.returncode == 0 and option in o # May mutate depends and gir_inc_dirs def _scan_include(self, state: 'ModuleState', includes: T.List[T.Union[str, GirTarget]] ) -> T.Tuple[T.List[str], T.List[str], T.List[GirTarget]]: ret: T.List[str] = [] gir_inc_dirs: T.List[str] = [] depends: T.List[GirTarget] = [] for inc in includes: if isinstance(inc, str): ret += [f'--include={inc}'] elif isinstance(inc, GirTarget): gir_inc_dirs .append(os.path.join(state.environment.get_build_dir(), inc.get_subdir())) ret.append(f"--include-uninstalled={os.path.join(inc.get_subdir(), inc.get_basename())}") depends.append(inc) return ret, gir_inc_dirs, depends def _scan_langs(self, state: 'ModuleState', langs: T.Iterable[str]) -> T.List[str]: ret: T.List[str] = [] for lang in langs: link_args = state.environment.coredata.get_external_link_args(MachineChoice.HOST, lang) for link_arg in link_args: if link_arg.startswith('-L'): ret.append(link_arg) return ret def _scan_gir_targets(self, state: 'ModuleState', girtargets: T.List[build.BuildTarget]) -> T.List[T.Union[str, build.Executable]]: ret: T.List[T.Union[str, build.Executable]] = [] for girtarget in girtargets: if isinstance(girtarget, build.Executable): ret += ['--program', girtarget] else: # Because of https://gitlab.gnome.org/GNOME/gobject-introspection/merge_requests/72 # we can't use the full path until this is merged. libpath = os.path.join(girtarget.get_subdir(), girtarget.get_filename()) # Must use absolute paths here because g-ir-scanner will not # add them to the runtime path list if they're relative. This # means we cannot use @BUILD_ROOT@ build_root = state.environment.get_build_dir() if isinstance(girtarget, build.SharedLibrary): # need to put our output directory first as we need to use the # generated libraries instead of any possibly installed system/prefix # ones. ret += ["-L{}/{}".format(build_root, os.path.dirname(libpath))] libname = girtarget.get_basename() else: libname = os.path.join(f"{build_root}/{libpath}") ret += ['--library', libname] # Needed for the following binutils bug: # https://github.com/mesonbuild/meson/issues/1911 # However, g-ir-scanner does not understand -Wl,-rpath # so we need to use -L instead for d in state.backend.determine_rpath_dirs(girtarget): d = os.path.join(state.environment.get_build_dir(), d) ret.append('-L' + d) return ret def _get_girtargets_langs_compilers(self, girtargets: T.Sequence[build.BuildTarget]) -> T.List[T.Tuple[str, 'Compiler']]: ret: T.List[T.Tuple[str, 'Compiler']] = [] for girtarget in girtargets: for lang, compiler in girtarget.compilers.items(): # XXX: Can you use g-i with any other language? if lang in ('c', 'cpp', 'objc', 'objcpp', 'd'): ret.append((lang, compiler)) break return ret def _get_gir_targets_deps(self, girtargets: T.Sequence[build.BuildTarget] ) -> T.List[T.Union[build.Target, Dependency]]: ret: T.List[T.Union[build.Target, Dependency]] = [] for girtarget in girtargets: ret += girtarget.get_all_link_deps() ret += girtarget.get_external_deps() return ret def _get_gir_targets_inc_dirs(self, girtargets: T.List[build.BuildTarget]) -> T.List[build.IncludeDirs]: ret: T.List[build.IncludeDirs] = [] for girtarget in girtargets: ret += girtarget.get_include_dirs() return ret def _get_langs_compilers_flags(self, state: 'ModuleState', langs_compilers: T.List[T.Tuple[str, 'Compiler']] ) -> T.Tuple[T.List[str], T.List[str], T.List[str]]: cflags: T.List[str] = [] internal_ldflags: T.List[str] = [] external_ldflags: T.List[str] = [] for lang, compiler in langs_compilers: if state.global_args.get(lang): cflags += state.global_args[lang] if state.project_args.get(lang): cflags += state.project_args[lang] if mesonlib.OptionKey('b_sanitize') in compiler.base_options: sanitize = state.environment.coredata.options[mesonlib.OptionKey('b_sanitize')].value cflags += compiler.sanitizer_compile_args(sanitize) sanitize = sanitize.split(',') # These must be first in ldflags if 'address' in sanitize: internal_ldflags += ['-lasan'] if 'thread' in sanitize: internal_ldflags += ['-ltsan'] if 'undefined' in sanitize: internal_ldflags += ['-lubsan'] # FIXME: Linking directly to lib*san is not recommended but g-ir-scanner # does not understand -f LDFLAGS. https://bugzilla.gnome.org/show_bug.cgi?id=783892 # ldflags += compiler.sanitizer_link_args(sanitize) return cflags, internal_ldflags, external_ldflags def _make_gir_filelist(self, state: 'ModuleState', srcdir: str, ns: str, nsversion: str, girtargets: T.List[build.BuildTarget], libsources: T.Sequence[T.Union[ str, mesonlib.File, build.GeneratedList, build.CustomTarget, build.CustomTargetIndex]] ) -> str: gir_filelist_dir = state.backend.get_target_private_dir_abs(girtargets[0]) if not os.path.isdir(gir_filelist_dir): os.mkdir(gir_filelist_dir) gir_filelist_filename = os.path.join(gir_filelist_dir, f'{ns}_{nsversion}_gir_filelist') with open(gir_filelist_filename, 'w', encoding='utf-8') as gir_filelist: for s in libsources: if isinstance(s, (build.CustomTarget, build.CustomTargetIndex)): for custom_output in s.get_outputs(): gir_filelist.write(os.path.join(state.environment.get_build_dir(), state.backend.get_target_dir(s), custom_output) + '\n') elif isinstance(s, mesonlib.File): gir_filelist.write(s.rel_to_builddir(state.build_to_src) + '\n') elif isinstance(s, build.GeneratedList): for gen_src in s.get_outputs(): gir_filelist.write(os.path.join(srcdir, gen_src) + '\n') else: gir_filelist.write(os.path.join(srcdir, s) + '\n') return gir_filelist_filename def _make_gir_target(self, state: 'ModuleState', girfile: str, scan_command: T.List[str], generated_files: T.Sequence[T.Union[str, mesonlib.File, build.CustomTarget, build.CustomTargetIndex, build.GeneratedList]], depends: T.List[build.Target], kwargs: T.Dict[str, T.Any]) -> GirTarget: scankwargs = { 'input': generated_files, 'output': girfile, 'command': scan_command, 'depends': depends, 'install': kwargs['install'], 'install_dir': kwargs['install_dir_gir'] or os.path.join(state.environment.get_datadir(), 'gir-1.0'), 'install_tag': 'devel', 'build_by_default': kwargs['build_by_default'], } return GirTarget(girfile, state.subdir, state.subproject, scankwargs) def _make_typelib_target(self, state: 'ModuleState', typelib_output: str, typelib_cmd: T.List[str], generated_files: T.Sequence[T.Union[str, mesonlib.File, build.CustomTarget, build.CustomTargetIndex, build.GeneratedList]], kwargs: T.Dict[str, T.Any]) -> TypelibTarget: typelib_kwargs = { 'input': generated_files, 'output': [typelib_output], 'command': typelib_cmd, 'install': kwargs['install'], 'install_dir': kwargs['install_dir_typelib'] or os.path.join(state.environment.get_libdir(), 'girepository-1.0'), 'install_tag': 'typelib', 'build_by_default': kwargs['build_by_default'], } return TypelibTarget(typelib_output, state.subdir, state.subproject, typelib_kwargs) # May mutate depends def _gather_typelib_includes_and_update_depends(self, state: 'ModuleState', deps: T.List[Dependency], depends: T.List[build.Target]) -> T.List[str]: # Need to recursively add deps on GirTarget sources from our # dependencies and also find the include directories needed for the # typelib generation custom target below. typelib_includes: T.List[str] = [] for dep in deps: # Add a dependency on each GirTarget listed in dependencies and add # the directory where it will be generated to the typelib includes if isinstance(dep, InternalDependency): for source in dep.sources: if isinstance(source, GirTarget) and source not in depends: depends.append(source) subdir = os.path.join(state.environment.get_build_dir(), source.get_subdir()) if subdir not in typelib_includes: typelib_includes.append(subdir) # Do the same, but for dependencies of dependencies. These are # stored in the list of generated sources for each link dep (from # girtarget.get_all_link_deps() above). # FIXME: Store this in the original form from declare_dependency() # so it can be used here directly. elif isinstance(dep, build.SharedLibrary): for source in dep.generated: if isinstance(source, GirTarget): subdir = os.path.join(state.environment.get_build_dir(), source.get_subdir()) if subdir not in typelib_includes: typelib_includes.append(subdir) if isinstance(dep, Dependency): girdir = dep.get_variable(pkgconfig='girdir', internal='girdir', default_value='') assert isinstance(girdir, str), 'for mypy' if girdir and girdir not in typelib_includes: typelib_includes.append(girdir) return typelib_includes def _get_external_args_for_langs(self, state: 'ModuleState', langs: T.Sequence[str]) -> T.List[str]: ret: T.List[str] = [] for lang in langs: ret += mesonlib.listify(state.environment.coredata.get_external_args(MachineChoice.HOST, lang)) return ret @staticmethod def _get_scanner_cflags(cflags: T.Iterable[str]) -> T.Iterable[str]: 'g-ir-scanner only accepts -I/-D/-U; must ignore all other flags' for f in cflags: # _FORTIFY_SOURCE depends on / works together with -O, on the other hand this # just invokes the preprocessor anyway if f.startswith(('-D', '-U', '-I')) and not f.startswith('-D_FORTIFY_SOURCE'): yield f @staticmethod def _get_scanner_ldflags(ldflags: T.Iterable[str]) -> T.Iterable[str]: 'g-ir-scanner only accepts -L/-l; must ignore -F and other linker flags' for f in ldflags: if f.startswith(('-L', '-l', '--extra-library')): yield f @typed_pos_args('gnome.generate_gir', varargs=(build.Executable, build.SharedLibrary, build.StaticLibrary), min_varargs=1) @typed_kwargs( 'gnome.generate_gir', INSTALL_KW, _BUILD_BY_DEFAULT.evolve(since='0.40.0'), _EXTRA_ARGS_KW, KwargInfo('dependencies', ContainerTypeInfo(list, Dependency), default=[], listify=True), KwargInfo('export_packages', ContainerTypeInfo(list, str), default=[], listify=True), KwargInfo('fatal_warnings', bool, default=False, since='0.55.0'), KwargInfo('header', ContainerTypeInfo(list, str), default=[], listify=True), KwargInfo('identifier_prefix', ContainerTypeInfo(list, str), default=[], listify=True), KwargInfo('include_directories', ContainerTypeInfo(list, (str, build.IncludeDirs)), default=[], listify=True), KwargInfo('includes', ContainerTypeInfo(list, (str, GirTarget)), default=[], listify=True), KwargInfo('install_dir_gir', (str, NoneType)), KwargInfo('install_dir_typelib', (str, NoneType)), KwargInfo('link_with', ContainerTypeInfo(list, (build.SharedLibrary, build.StaticLibrary)), default=[], listify=True), KwargInfo('namespace', str, required=True), KwargInfo('nsversion', str, required=True), KwargInfo('sources', ContainerTypeInfo(list, (str, mesonlib.File, build.GeneratedList, build.CustomTarget, build.CustomTargetIndex)), default=[], listify=True), KwargInfo('symbol_prefix', ContainerTypeInfo(list, str), default=[], listify=True), ) def generate_gir(self, state: 'ModuleState', args: T.Tuple[T.List[T.Union[build.Executable, build.SharedLibrary, build.StaticLibrary]]], kwargs: 'GenerateGir') -> ModuleReturnValue: girtargets = [self._unwrap_gir_target(arg, state) for arg in args[0]] if len(girtargets) > 1 and any([isinstance(el, build.Executable) for el in girtargets]): raise MesonException('generate_gir only accepts a single argument when one of the arguments is an executable') gir_dep, giscanner, gicompiler = self._get_gir_dep(state) ns = kwargs['namespace'] nsversion = kwargs['nsversion'] libsources = kwargs['sources'] girfile = f'{ns}-{nsversion}.gir' srcdir = os.path.join(state.environment.get_source_dir(), state.subdir) builddir = os.path.join(state.environment.get_build_dir(), state.subdir) depends: T.List[T.Union['FileOrString', build.GeneratedTypes, build.Executable, build.SharedLibrary, build.StaticLibrary]] = [] depends.extend(gir_dep.sources) depends.extend(girtargets) langs_compilers = self._get_girtargets_langs_compilers(girtargets) cflags, internal_ldflags, external_ldflags = self._get_langs_compilers_flags(state, langs_compilers) deps = self._get_gir_targets_deps(girtargets) deps += kwargs['dependencies'] deps += [gir_dep] typelib_includes = self._gather_typelib_includes_and_update_depends(state, deps, depends) # ldflags will be misinterpreted by gir scanner (showing # spurious dependencies) but building GStreamer fails if they # are not used here. dep_cflags, dep_internal_ldflags, dep_external_ldflags, _, gi_includes = \ self._get_dependencies_flags(deps, state, depends, use_gir_args=True) scan_cflags = [] scan_cflags += list(self._get_scanner_cflags(cflags)) scan_cflags += list(self._get_scanner_cflags(dep_cflags)) scan_cflags += list(self._get_scanner_cflags(self._get_external_args_for_langs(state, [lc[0] for lc in langs_compilers]))) scan_internal_ldflags = [] scan_internal_ldflags += list(self._get_scanner_ldflags(internal_ldflags)) scan_internal_ldflags += list(self._get_scanner_ldflags(dep_internal_ldflags)) scan_external_ldflags = [] scan_external_ldflags += list(self._get_scanner_ldflags(external_ldflags)) scan_external_ldflags += list(self._get_scanner_ldflags(dep_external_ldflags)) girtargets_inc_dirs = self._get_gir_targets_inc_dirs(girtargets) inc_dirs = kwargs['include_directories'] gir_inc_dirs: T.List[str] = [] scan_command: T.List[T.Union[str, build.Executable, 'ExternalProgram', 'OverrideProgram']] = [giscanner] scan_command += ['--no-libtool'] scan_command += ['--namespace=' + ns, '--nsversion=' + nsversion] scan_command += ['--warn-all'] scan_command += ['--output', '@OUTPUT@'] scan_command += [f'--c-include={h}' for h in kwargs['header']] scan_command += kwargs['extra_args'] scan_command += ['-I' + srcdir, '-I' + builddir] scan_command += state.get_include_args(girtargets_inc_dirs) scan_command += ['--filelist=' + self._make_gir_filelist(state, srcdir, ns, nsversion, girtargets, libsources)] scan_command += mesonlib.listify([self._get_link_args(state, l, depends, use_gir_args=True) for l in kwargs['link_with']]) _cmd, _ginc, _deps = self._scan_include(state, kwargs['includes']) scan_command.extend(_cmd) gir_inc_dirs.extend(_ginc) depends.extend(_deps) scan_command += [f'--symbol-prefix={p}' for p in kwargs['symbol_prefix']] scan_command += [f'--identifier-prefix={p}' for p in kwargs['identifier_prefix']] scan_command += [f'--pkg-export={p}' for p in kwargs['export_packages']] scan_command += ['--cflags-begin'] scan_command += scan_cflags scan_command += ['--cflags-end'] scan_command += state.get_include_args(inc_dirs) scan_command += state.get_include_args(list(gi_includes) + gir_inc_dirs + inc_dirs, prefix='--add-include-path=') scan_command += list(scan_internal_ldflags) scan_command += self._scan_gir_targets(state, girtargets) scan_command += self._scan_langs(state, [lc[0] for lc in langs_compilers]) scan_command += list(scan_external_ldflags) if self._gir_has_option('--sources-top-dirs'): scan_command += ['--sources-top-dirs', os.path.join(state.environment.get_source_dir(), self.interpreter.subproject_dir, state.subproject)] scan_command += ['--sources-top-dirs', os.path.join(state.environment.get_build_dir(), self.interpreter.subproject_dir, state.subproject)] if '--warn-error' in scan_command: FeatureDeprecated.single_use('gnome.generate_gir argument --warn-error', '0.55.0', state.subproject, 'Use "fatal_warnings" keyword argument', state.current_node) if kwargs['fatal_warnings']: scan_command.append('--warn-error') generated_files = [f for f in libsources if isinstance(f, (GeneratedList, CustomTarget, CustomTargetIndex))] scan_target = self._make_gir_target(state, girfile, scan_command, generated_files, depends, kwargs) typelib_output = f'{ns}-{nsversion}.typelib' typelib_cmd = [gicompiler, scan_target, '--output', '@OUTPUT@'] typelib_cmd += state.get_include_args(gir_inc_dirs, prefix='--includedir=') for incdir in typelib_includes: typelib_cmd += ["--includedir=" + incdir] typelib_target = self._make_typelib_target(state, typelib_output, typelib_cmd, generated_files, kwargs) self._devenv_prepend('GI_TYPELIB_PATH', os.path.join(state.environment.get_build_dir(), state.subdir)) rv = [scan_target, typelib_target] return ModuleReturnValue(rv, rv) @noPosargs @typed_kwargs('gnome.compile_schemas', _BUILD_BY_DEFAULT.evolve(since='0.40.0'), DEPEND_FILES_KW) def compile_schemas(self, state: 'ModuleState', args: T.List['TYPE_var'], kwargs: 'CompileSchemas') -> ModuleReturnValue: srcdir = os.path.join(state.build_to_src, state.subdir) outdir = state.subdir cmd = [state.find_program('glib-compile-schemas'), '--targetdir', outdir, srcdir] ct_kwargs = T.cast(T.Dict[str, T.Any], kwargs.copy()) ct_kwargs['command'] = cmd ct_kwargs['input'] = [] ct_kwargs['output'] = 'gschemas.compiled' if state.subdir == '': targetname = 'gsettings-compile' else: targetname = 'gsettings-compile-' + state.subdir.replace('/', '_') target_g = build.CustomTarget(targetname, state.subdir, state.subproject, ct_kwargs) self._devenv_prepend('GSETTINGS_SCHEMA_DIR', os.path.join(state.environment.get_build_dir(), state.subdir)) return ModuleReturnValue(target_g, [target_g]) @FeatureDeprecatedKwargs('gnome.yelp', '0.43.0', ['languages'], 'Use a LINGUAS file in the source directory instead') @typed_pos_args('gnome.yelp', str, varargs=str) @typed_kwargs( 'gnome.yelp', KwargInfo('languages', ContainerTypeInfo(list, str), listify=True, default=[]), KwargInfo('media', ContainerTypeInfo(list, str), listify=True, default=[]), KwargInfo('sources', ContainerTypeInfo(list, str), listify=True, default=[]), KwargInfo('symlink_media', bool, default=True), ) def yelp(self, state: 'ModuleState', args: T.Tuple[str, T.List[str]], kwargs: 'Yelp') -> ModuleReturnValue: project_id = args[0] sources = kwargs['sources'] if args[1]: FeatureDeprecated.single_use('gnome.yelp more than one positional argument', '0.60.0', state.subproject, 'use the "sources" keyword argument instead.', state.current_node) if not sources: sources = args[1] if not sources: raise MesonException('Yelp requires a list of sources') elif args[1]: mlog.warning('"gnome.yelp" ignores positional sources arguments when the "sources" keyword argument is set') source_str = '@@'.join(sources) langs = kwargs['languages'] script = state.environment.get_build_command() inscript_args = ['--internal', 'yelphelper', 'install', '--subdir=' + state.subdir, '--id=' + project_id, '--installdir=' + os.path.join(state.environment.get_datadir(), 'help'), '--sources=' + source_str] if kwargs['symlink_media']: inscript_args.append('--symlinks=true') if kwargs['media']: inscript_args.append('--media=' + '@@'.join(kwargs['media'])) if langs: inscript_args.append('--langs=' + '@@'.join(langs)) inscript = state.backend.get_executable_serialisation(script + inscript_args) potargs = state.environment.get_build_command() + [ '--internal', 'yelphelper', 'pot', '--subdir=' + state.subdir, '--id=' + project_id, '--sources=' + source_str, ] pottarget = build.RunTarget('help-' + project_id + '-pot', potargs, [], state.subdir, state.subproject) poargs = state.environment.get_build_command() + [ '--internal', 'yelphelper', 'update-po', '--subdir=' + state.subdir, '--id=' + project_id, '--sources=' + source_str, '--langs=' + '@@'.join(langs), ] potarget = build.RunTarget('help-' + project_id + '-update-po', poargs, [], state.subdir, state.subproject) rv: T.List[T.Union[build.ExecutableSerialisation, build.RunTarget]] = [inscript, pottarget, potarget] return ModuleReturnValue(None, rv) @typed_pos_args('gnome.gtkdoc', str) @typed_kwargs( 'gnome.gtkdoc', KwargInfo('c_args', ContainerTypeInfo(list, str), since='0.48.0', default=[], listify=True), KwargInfo('check', bool, default=False, since='0.52.0'), KwargInfo('content_files', ContainerTypeInfo(list, (str, mesonlib.File, build.GeneratedList, build.CustomTarget, build.CustomTargetIndex)), default=[], listify=True), KwargInfo( 'dependencies', ContainerTypeInfo(list, (Dependency, build.SharedLibrary, build.StaticLibrary)), listify=True, default=[]), KwargInfo('expand_content_files', ContainerTypeInfo(list, str), default=[], listify=True), KwargInfo('fixxref_args', ContainerTypeInfo(list, str), default=[], listify=True), KwargInfo('gobject_typesfile', ContainerTypeInfo(list, str), default=[], listify=True), KwargInfo('html_args', ContainerTypeInfo(list, str), default=[], listify=True), KwargInfo('html_assets', ContainerTypeInfo(list, str), default=[], listify=True), KwargInfo('ignore_headers', ContainerTypeInfo(list, str), default=[], listify=True), KwargInfo( 'include_directories', ContainerTypeInfo(list, (str, build.IncludeDirs)), listify=True, default=[]), KwargInfo('install', bool, default=True), KwargInfo('install_dir', ContainerTypeInfo(list, str), default=[], listify=True), KwargInfo('main_sgml', (str, NoneType)), KwargInfo('main_xml', (str, NoneType)), KwargInfo('mkdb_args', ContainerTypeInfo(list, str), default=[], listify=True), KwargInfo( 'mode', str, default='auto', since='0.37.0', validator=in_set_validator({'xml', 'sgml', 'none', 'auto'})), KwargInfo('module_version', str, default='', since='0.48.0'), KwargInfo('namespace', str, default='', since='0.37.0'), KwargInfo('scan_args', ContainerTypeInfo(list, str), default=[], listify=True), KwargInfo('scanobjs_args', ContainerTypeInfo(list, str), default=[], listify=True), KwargInfo('src_dir', ContainerTypeInfo(list, (str, build.IncludeDirs)), listify=True, required=True), ) def gtkdoc(self, state: 'ModuleState', args: T.Tuple[str], kwargs: 'GtkDoc') -> ModuleReturnValue: modulename = args[0] main_file = kwargs['main_sgml'] main_xml = kwargs['main_xml'] if main_xml is not None: if main_file is not None: raise InvalidArguments('gnome.gtkdoc: main_xml and main_xgml are exclusive arguments') main_file = main_xml moduleversion = kwargs['module_version'] targetname = modulename + ('-' + moduleversion if moduleversion else '') + '-doc' command = state.environment.get_build_command() namespace = kwargs['namespace'] def abs_filenames(files: T.Iterable['FileOrString']) -> T.Iterator[str]: for f in files: if isinstance(f, mesonlib.File): yield f.absolute_path(state.environment.get_source_dir(), state.environment.get_build_dir()) else: yield os.path.join(state.environment.get_source_dir(), state.subdir, f) src_dirs = kwargs['src_dir'] header_dirs: T.List[str] = [] for src_dir in src_dirs: if isinstance(src_dir, build.IncludeDirs): header_dirs.extend(src_dir.to_string_list(state.environment.get_source_dir(), state.environment.get_build_dir())) else: header_dirs.append(src_dir) t_args = ['--internal', 'gtkdoc', '--sourcedir=' + state.environment.get_source_dir(), '--builddir=' + state.environment.get_build_dir(), '--subdir=' + state.subdir, '--headerdirs=' + '@@'.join(header_dirs), '--mainfile=' + main_file, '--modulename=' + modulename, '--moduleversion=' + moduleversion, '--mode=' + kwargs['mode']] for tool in ['scan', 'scangobj', 'mkdb', 'mkhtml', 'fixxref']: program_name = 'gtkdoc-' + tool program = state.find_program(program_name) path = program.get_path() t_args.append(f'--{program_name}={path}') if namespace: t_args.append('--namespace=' + namespace) t_args.append(f'--htmlargs={"@@".join(kwargs["html_args"])}') t_args.append(f'--scanargs={"@@".join(kwargs["scan_args"])}') t_args.append(f'--scanobjsargs={"@@".join(kwargs["scanobjs_args"])}') t_args.append(f'--gobjects-types-file={"@@".join(abs_filenames(kwargs["gobject_typesfile"]))}') t_args.append(f'--fixxrefargs={"@@".join(kwargs["fixxref_args"])}') t_args.append(f'--mkdbargs={"@@".join(kwargs["mkdb_args"])}') t_args.append(f'--html-assets={"@@".join(abs_filenames(kwargs["html_assets"]))}') depends: T.List['build.GeneratedTypes'] = [] content_files = [] for s in kwargs['content_files']: if isinstance(s, (build.CustomTarget, build.CustomTargetIndex)): depends.append(s) for o in s.get_outputs(): content_files.append(os.path.join(state.environment.get_build_dir(), state.backend.get_target_dir(s), o)) elif isinstance(s, mesonlib.File): content_files.append(s.absolute_path(state.environment.get_source_dir(), state.environment.get_build_dir())) elif isinstance(s, build.GeneratedList): depends.append(s) for gen_src in s.get_outputs(): content_files.append(os.path.join(state.environment.get_source_dir(), state.subdir, gen_src)) else: content_files.append(os.path.join(state.environment.get_source_dir(), state.subdir, s)) t_args += ['--content-files=' + '@@'.join(content_files)] t_args.append(f'--expand-content-files={"@@".join(abs_filenames(kwargs["expand_content_files"]))}') t_args.append(f'--ignore-headers={"@@".join(kwargs["ignore_headers"])}') t_args.append(f'--installdir={"@@".join(kwargs["install_dir"])}') t_args += self._get_build_args(kwargs['c_args'], kwargs['include_directories'], kwargs['dependencies'], state, depends) custom_kwargs = {'output': modulename + '-decl.txt', 'command': command + t_args, 'depends': depends, 'build_always_stale': True, } custom_target = build.CustomTarget(targetname, state.subdir, state.subproject, custom_kwargs) alias_target = build.AliasTarget(targetname, [custom_target], state.subdir, state.subproject) if kwargs['check']: check_cmd = state.find_program('gtkdoc-check') check_env = ['DOC_MODULE=' + modulename, 'DOC_MAIN_SGML_FILE=' + main_file] check_args = (targetname + '-check', check_cmd) check_workdir = os.path.join(state.environment.get_build_dir(), state.subdir) state.test(check_args, env=check_env, workdir=check_workdir, depends=[custom_target]) res: T.List[T.Union[build.Target, build.ExecutableSerialisation]] = [custom_target, alias_target] if kwargs['install']: res.append(state.backend.get_executable_serialisation(command + t_args, tag='doc')) return ModuleReturnValue(custom_target, res) def _get_build_args(self, c_args: T.List[str], inc_dirs: T.List[T.Union[str, build.IncludeDirs]], deps: T.List[T.Union[Dependency, build.SharedLibrary, build.StaticLibrary]], state: 'ModuleState', depends: T.List[build.BuildTarget]) -> T.List[str]: args: T.List[str] = [] cflags = c_args.copy() deps_cflags, internal_ldflags, external_ldflags, *_ = \ self._get_dependencies_flags(deps, state, depends, include_rpath=True) cflags.extend(deps_cflags) cflags.extend(state.get_include_args(inc_dirs)) ldflags: T.List[str] = [] ldflags.extend(internal_ldflags) ldflags.extend(external_ldflags) cflags.extend(state.environment.coredata.get_external_args(MachineChoice.HOST, 'c')) ldflags.extend(state.environment.coredata.get_external_link_args(MachineChoice.HOST, 'c')) compiler = state.environment.coredata.compilers[MachineChoice.HOST]['c'] compiler_flags = self._get_langs_compilers_flags(state, [('c', compiler)]) cflags.extend(compiler_flags[0]) ldflags.extend(compiler_flags[1]) ldflags.extend(compiler_flags[2]) if compiler: args += ['--cc=%s' % join_args(compiler.get_exelist())] args += ['--ld=%s' % join_args(compiler.get_linker_exelist())] if cflags: args += ['--cflags=%s' % join_args(cflags)] if ldflags: args += ['--ldflags=%s' % join_args(ldflags)] return args @noKwargs @typed_pos_args('gnome.gtkdoc_html_dir', str) def gtkdoc_html_dir(self, state: 'ModuleState', args: T.Tuple[str], kwargs: 'TYPE_kwargs') -> str: return os.path.join('share/gtk-doc/html', args[0]) @typed_pos_args('gnome.gdbus_codegen', str, optargs=[str]) @typed_kwargs( 'gnome.gdbus_codegen', _BUILD_BY_DEFAULT.evolve(since='0.40.0'), KwargInfo('sources', ContainerTypeInfo(list, (str, mesonlib.File)), since='0.46.0', default=[], listify=True), KwargInfo('extra_args', ContainerTypeInfo(list, str), since='0.47.0', default=[], listify=True), KwargInfo('interface_prefix', (str, NoneType)), KwargInfo('namespace', (str, NoneType)), KwargInfo('object_manager', bool, default=False), KwargInfo( 'annotations', ContainerTypeInfo(list, str), listify=True, default=[], validator=lambda x: 'must be made up of 3 strings for ELEMENT, KEY, and VALUE' if len(x) != 3 else None ), KwargInfo('install_header', bool, default=False, since='0.46.0'), KwargInfo('install_dir', (str, NoneType), since='0.46.0'), KwargInfo('docbook', (str, NoneType)), KwargInfo( 'autocleanup', str, default='default', since='0.47.0', validator=in_set_validator({'all', 'none', 'objects'})), ) def gdbus_codegen(self, state: 'ModuleState', args: T.Tuple[str, T.Optional[str]], kwargs: 'GdbusCodegen') -> ModuleReturnValue: namebase = args[0] xml_files: T.List['FileOrString'] = [args[1]] if args[1] else [] cmd: T.List[T.Union['ExternalProgram', str]] = [state.find_program('gdbus-codegen')] cmd.extend(kwargs['extra_args']) # Autocleanup supported? glib_version = self._get_native_glib_version(state) if not mesonlib.version_compare(glib_version, '>= 2.49.1'): # Warn if requested, silently disable if not if kwargs['autocleanup'] != 'default': mlog.warning(f'Glib version ({glib_version}) is too old to support the \'autocleanup\' ' 'kwarg, need 2.49.1 or newer') else: # Handle legacy glib versions that don't have autocleanup ac = kwargs['autocleanup'] if ac == 'default': ac = 'all' cmd.extend(['--c-generate-autocleanup', ac]) if kwargs['interface_prefix'] is not None: cmd.extend(['--interface-prefix', kwargs['interface_prefix']]) if kwargs['namespace'] is not None: cmd.extend(['--c-namespace', kwargs['namespace']]) if kwargs['object_manager']: cmd.extend(['--c-generate-object-manager']) xml_files.extend(kwargs['sources']) build_by_default = kwargs['build_by_default'] # Annotations are a bit ugly in that they are a list of lists of strings... if kwargs['annotations']: cmd.append('--annotate') cmd.extend(kwargs['annotations']) targets = [] install_header = kwargs['install_header'] install_dir = kwargs['install_dir'] or state.environment.coredata.get_option(mesonlib.OptionKey('includedir')) assert isinstance(install_dir, str), 'for mypy' output = namebase + '.c' # Added in https://gitlab.gnome.org/GNOME/glib/commit/e4d68c7b3e8b01ab1a4231bf6da21d045cb5a816 (2.55.2) # Fixed in https://gitlab.gnome.org/GNOME/glib/commit/cd1f82d8fc741a2203582c12cc21b4dacf7e1872 (2.56.2) if mesonlib.version_compare(glib_version, '>= 2.56.2'): custom_kwargs = {'input': xml_files, 'output': output, 'command': cmd + ['--body', '--output', '@OUTPUT@', '@INPUT@'], 'build_by_default': build_by_default } else: if kwargs['docbook'] is not None: docbook = kwargs['docbook'] cmd += ['--generate-docbook', docbook] # https://git.gnome.org/browse/glib/commit/?id=ee09bb704fe9ccb24d92dd86696a0e6bb8f0dc1a if mesonlib.version_compare(glib_version, '>= 2.51.3'): cmd += ['--output-directory', '@OUTDIR@', '--generate-c-code', namebase, '@INPUT@'] else: self._print_gdbus_warning() cmd += ['--generate-c-code', '@OUTDIR@/' + namebase, '@INPUT@'] custom_kwargs = {'input': xml_files, 'output': output, 'command': cmd, 'build_by_default': build_by_default } cfile_custom_target = build.CustomTarget(output, state.subdir, state.subproject, custom_kwargs) targets.append(cfile_custom_target) output = namebase + '.h' if mesonlib.version_compare(glib_version, '>= 2.56.2'): custom_kwargs = {'input': xml_files, 'output': output, 'command': cmd + ['--header', '--output', '@OUTPUT@', '@INPUT@'], 'build_by_default': build_by_default, 'install': install_header, 'install_dir': install_dir } else: custom_kwargs = {'input': xml_files, 'output': output, 'command': cmd, 'build_by_default': build_by_default, 'install': install_header, 'install_dir': install_dir, 'depends': cfile_custom_target } hfile_custom_target = build.CustomTarget(output, state.subdir, state.subproject, custom_kwargs) targets.append(hfile_custom_target) if kwargs['docbook'] is not None: docbook = kwargs['docbook'] if not isinstance(docbook, str): raise MesonException('docbook value must be a string.') docbook_cmd = cmd + ['--output-directory', '@OUTDIR@', '--generate-docbook', docbook, '@INPUT@'] # The docbook output is always ${docbook}-${name_of_xml_file} output = namebase + '-docbook' outputs = [] for f in xml_files: outputs.append('{}-{}'.format(docbook, os.path.basename(str(f)))) if mesonlib.version_compare(glib_version, '>= 2.56.2'): custom_kwargs = {'input': xml_files, 'output': outputs, 'command': docbook_cmd, 'build_by_default': build_by_default } else: custom_kwargs = {'input': xml_files, 'output': outputs, 'command': cmd, 'build_by_default': build_by_default, 'depends': cfile_custom_target } docbook_custom_target = build.CustomTarget(output, state.subdir, state.subproject, custom_kwargs) targets.append(docbook_custom_target) return ModuleReturnValue(targets, targets) @permittedKwargs({'sources', 'c_template', 'h_template', 'install_header', 'install_dir', 'comments', 'identifier_prefix', 'symbol_prefix', 'eprod', 'vprod', 'fhead', 'fprod', 'ftail', 'vhead', 'vtail', 'depends'}) @typed_pos_args('gnome.mkenums', str) def mkenums(self, state: 'ModuleState', args: T.Tuple[str], kwargs) -> ModuleReturnValue: basename = args[0] if 'sources' not in kwargs: raise MesonException('Missing keyword argument "sources".') sources = kwargs.pop('sources') if isinstance(sources, str): sources = [sources] elif not isinstance(sources, list): raise MesonException( 'Sources keyword argument must be a string or array.') cmd = [] known_kwargs = ['comments', 'eprod', 'fhead', 'fprod', 'ftail', 'identifier_prefix', 'symbol_prefix', 'template', 'vhead', 'vprod', 'vtail'] known_custom_target_kwargs = ['install_dir', 'build_always', 'depends', 'depend_files'] c_template = h_template = None install_header = False for arg, value in kwargs.items(): if arg == 'sources': raise AssertionError("sources should've already been handled") elif arg == 'c_template': c_template = value if isinstance(c_template, mesonlib.File): c_template = c_template.absolute_path(state.environment.source_dir, state.environment.build_dir) if 'template' in kwargs: raise MesonException('Mkenums does not accept both ' 'c_template and template keyword ' 'arguments at the same time.') elif arg == 'h_template': h_template = value if isinstance(h_template, mesonlib.File): h_template = h_template.absolute_path(state.environment.source_dir, state.environment.build_dir) if 'template' in kwargs: raise MesonException('Mkenums does not accept both ' 'h_template and template keyword ' 'arguments at the same time.') elif arg == 'install_header': install_header = value elif arg in known_kwargs: cmd += ['--' + arg.replace('_', '-'), value] elif arg not in known_custom_target_kwargs: raise MesonException( f'Mkenums does not take a {arg} keyword argument.') cmd = [state.find_program(['glib-mkenums', 'mkenums'])] + cmd custom_kwargs = {} for arg in known_custom_target_kwargs: if arg in kwargs: custom_kwargs[arg] = kwargs[arg] targets = [] if h_template is not None: h_output = os.path.basename(os.path.splitext(h_template)[0]) # We always set template as the first element in the source array # so --template consumes it. h_cmd = cmd + ['--template', '@INPUT@'] h_sources = [h_template] + sources # Copy so we don't mutate the arguments for the c_template h_kwargs = custom_kwargs.copy() h_kwargs['install'] = install_header if 'install_dir' not in h_kwargs: h_kwargs['install_dir'] = \ state.environment.coredata.get_option(mesonlib.OptionKey('includedir')) h_target = self._make_mkenum_custom_target(state, h_sources, h_output, h_cmd, h_kwargs) targets.append(h_target) if c_template is not None: c_output = os.path.basename(os.path.splitext(c_template)[0]) # We always set template as the first element in the source array # so --template consumes it. c_cmd = cmd + ['--template', '@INPUT@'] c_sources = [c_template] + sources c_kwargs = custom_kwargs.copy() # Never install the C file. Complain on bug tracker if you need it. c_kwargs['install'] = False c_kwargs['install_dir'] = [] if h_template is not None: if 'depends' in custom_kwargs: c_kwargs['depends'] += [h_target] else: c_kwargs['depends'] = h_target c_target = self._make_mkenum_custom_target(state, c_sources, c_output, c_cmd, c_kwargs) targets.insert(0, c_target) if c_template is None and h_template is None: generic_cmd = cmd + ['@INPUT@'] custom_kwargs['install'] = install_header if 'install_dir' not in custom_kwargs: custom_kwargs['install_dir'] = \ state.environment.coredata.get_option(mesonlib.OptionKey('includedir')) target = self._make_mkenum_custom_target(state, sources, basename, generic_cmd, custom_kwargs) return ModuleReturnValue(target, [target]) elif len(targets) == 1: return ModuleReturnValue(targets[0], [targets[0]]) else: return ModuleReturnValue(targets, targets) @FeatureNew('gnome.mkenums_simple', '0.42.0') @typed_pos_args('gnome.mkenums_simple', str) def mkenums_simple(self, state: 'ModuleState', args: T.Tuple[str], kwargs) -> ModuleReturnValue: hdr_filename = f'{args[0]}.h' body_filename = f'{args[0]}.c' # not really needed, just for sanity checking forbidden_kwargs = ['c_template', 'h_template', 'eprod', 'fhead', 'fprod', 'ftail', 'vhead', 'vtail', 'comments'] for arg in forbidden_kwargs: if arg in kwargs: raise MesonException(f'mkenums_simple() does not take a {arg} keyword argument') # kwargs to pass as-is from mkenums_simple() to mkenums() shared_kwargs = ['sources', 'install_header', 'install_dir', 'identifier_prefix', 'symbol_prefix'] mkenums_kwargs = {} for arg in shared_kwargs: if arg in kwargs: mkenums_kwargs[arg] = kwargs[arg] # .c file generation c_file_kwargs = copy.deepcopy(mkenums_kwargs) if 'sources' not in kwargs: raise MesonException('Missing keyword argument "sources".') sources = kwargs['sources'] if isinstance(sources, str): sources = [sources] elif not isinstance(sources, list): raise MesonException( 'Sources keyword argument must be a string or array.') # The `install_header` argument will be used by mkenums() when # not using template files, so we need to forcibly unset it # when generating the C source file, otherwise we will end up # installing it c_file_kwargs['install_header'] = False header_prefix = kwargs.get('header_prefix', '') decl_decorator = kwargs.get('decorator', '') func_prefix = kwargs.get('function_prefix', '') body_prefix = kwargs.get('body_prefix', '') # Maybe we should write our own template files into the build dir # instead, but that seems like much more work, nice as it would be. fhead = '' if body_prefix != '': fhead += '%s\n' % body_prefix fhead += '#include "%s"\n' % hdr_filename for hdr in sources: fhead += '#include "{}"\n'.format(os.path.basename(str(hdr))) fhead += textwrap.dedent( ''' #define C_ENUM(v) ((gint) v) #define C_FLAGS(v) ((guint) v) ''') c_file_kwargs['fhead'] = fhead c_file_kwargs['fprod'] = textwrap.dedent( ''' /* enumerations from "@basename@" */ ''') c_file_kwargs['vhead'] = textwrap.dedent( f''' GType {func_prefix}@enum_name@_get_type (void) {{ static gsize gtype_id = 0; static const G@Type@Value values[] = {{''') c_file_kwargs['vprod'] = ' { C_@TYPE@(@VALUENAME@), "@VALUENAME@", "@valuenick@" },' c_file_kwargs['vtail'] = textwrap.dedent( ''' { 0, NULL, NULL } }; if (g_once_init_enter (&gtype_id)) { GType new_type = g_@type@_register_static (g_intern_static_string ("@EnumName@"), values); g_once_init_leave (&gtype_id, new_type); } return (GType) gtype_id; }''') rv = self.mkenums(state, [body_filename], c_file_kwargs) c_file = rv.return_value # .h file generation h_file_kwargs = copy.deepcopy(mkenums_kwargs) h_file_kwargs['fhead'] = textwrap.dedent( f'''#pragma once #include <glib-object.h> {header_prefix} G_BEGIN_DECLS ''') h_file_kwargs['fprod'] = textwrap.dedent( ''' /* enumerations from "@basename@" */ ''') h_file_kwargs['vhead'] = textwrap.dedent( f''' {decl_decorator} GType {func_prefix}@enum_name@_get_type (void); #define @ENUMPREFIX@_TYPE_@ENUMSHORT@ ({func_prefix}@enum_name@_get_type())''') h_file_kwargs['ftail'] = textwrap.dedent( ''' G_END_DECLS''') rv = self.mkenums(state, [hdr_filename], h_file_kwargs) h_file = rv.return_value return ModuleReturnValue([c_file, h_file], [c_file, h_file]) @staticmethod def _make_mkenum_custom_target( state: 'ModuleState', sources: T.Sequence[T.Union[str, mesonlib.File, build.CustomTarget, build.CustomTargetIndex, build.GeneratedList]], output: str, cmd: T.List[str], kwargs: T.Dict[str, T.Any]) -> build.CustomTarget: custom_kwargs = { 'input': sources, 'output': [output], 'capture': True, 'command': cmd } custom_kwargs.update(kwargs) return build.CustomTarget(output, state.subdir, state.subproject, custom_kwargs, # https://github.com/mesonbuild/meson/issues/973 absolute_paths=True) @permittedKwargs({'sources', 'prefix', 'install_header', 'install_dir', 'stdinc', 'nostdinc', 'internal', 'skip_source', 'valist_marshallers', 'extra_args'}) @typed_pos_args('gnome.genmarshal', str) def genmarshal(self, state: 'ModuleState', args: T.Tuple[str], kwargs) -> ModuleReturnValue: output = args[0] if 'sources' not in kwargs: raise MesonException('Missing keyword argument "sources".') sources = kwargs.pop('sources') if isinstance(sources, str): sources = [sources] elif not isinstance(sources, list): raise MesonException( 'Sources keyword argument must be a string or array.') new_genmarshal = mesonlib.version_compare(self._get_native_glib_version(state), '>= 2.53.3') cmd = [state.find_program('glib-genmarshal')] known_kwargs = ['internal', 'nostdinc', 'skip_source', 'stdinc', 'valist_marshallers', 'extra_args'] known_custom_target_kwargs = ['build_always', 'depends', 'depend_files', 'install_dir', 'install_header'] for arg, value in kwargs.items(): if arg == 'prefix': cmd += ['--prefix', value] elif arg == 'extra_args': if new_genmarshal: cmd += mesonlib.stringlistify(value) else: mlog.warning('The current version of GLib does not support extra arguments \n' 'for glib-genmarshal. You need at least GLib 2.53.3. See ', mlog.bold('https://github.com/mesonbuild/meson/pull/2049')) elif arg in known_kwargs and value: cmd += ['--' + arg.replace('_', '-')] elif arg not in known_custom_target_kwargs: raise MesonException(f'Genmarshal does not take a {arg} keyword argument.') install_header = kwargs.pop('install_header', False) install_dir = kwargs.pop('install_dir', []) custom_kwargs = { 'input': sources, } # https://github.com/GNOME/glib/commit/0fbc98097fac4d3e647684f344e508abae109fdf if mesonlib.version_compare(self._get_native_glib_version(state), '>= 2.51.0'): cmd += ['--output', '@OUTPUT@'] else: custom_kwargs['capture'] = True for arg in known_custom_target_kwargs: if arg in kwargs: custom_kwargs[arg] = kwargs[arg] header_file = output + '.h' custom_kwargs['command'] = cmd + ['--body', '@INPUT@'] if mesonlib.version_compare(self._get_native_glib_version(state), '>= 2.53.4'): # Silence any warnings about missing prototypes custom_kwargs['command'] += ['--include-header', header_file] custom_kwargs['output'] = output + '.c' body = build.CustomTarget(output + '_c', state.subdir, state.subproject, custom_kwargs) custom_kwargs['install'] = install_header custom_kwargs['install_dir'] = install_dir if new_genmarshal: cmd += ['--pragma-once'] custom_kwargs['command'] = cmd + ['--header', '@INPUT@'] custom_kwargs['output'] = header_file header = build.CustomTarget(output + '_h', state.subdir, state.subproject, custom_kwargs) rv = [body, header] return ModuleReturnValue(rv, rv) @staticmethod def _vapi_args_to_command(prefix: str, variable: str, kwargs: T.Dict[str, T.Any], accept_vapi: bool = False) -> T.List[str]: arg_list = mesonlib.extract_as_list(kwargs, variable) ret: T.List[str] = [] for arg in arg_list: if not isinstance(arg, str): types = 'strings' + ' or InternalDependencys' if accept_vapi else '' raise MesonException(f'All {variable} must be {types}') ret.append(prefix + arg) return ret def _extract_vapi_packages(self, state: 'ModuleState', kwargs: T.Dict[str, T.Any] ) -> T.Tuple[T.List[str], T.List[build.Target], T.List[str], T.List[str]]: ''' Packages are special because we need to: - Get a list of packages for the .deps file - Get a list of depends for any VapiTargets - Get package name from VapiTargets - Add include dirs for any VapiTargets ''' arg_list = kwargs.get('packages') if not arg_list: return [], [], [], [] arg_list = mesonlib.listify(arg_list) vapi_depends: T.List[build.Target] = [] vapi_packages: T.List[str] = [] vapi_includes: T.List[str] = [] ret: T.List[str] = [] remaining_args = [] for arg in arg_list: if isinstance(arg, InternalDependency): targets = [t for t in arg.sources if isinstance(t, VapiTarget)] for target in targets: srcdir = os.path.join(state.environment.get_source_dir(), target.get_subdir()) outdir = os.path.join(state.environment.get_build_dir(), target.get_subdir()) outfile = target.get_outputs()[0][:-5] # Strip .vapi ret.append('--vapidir=' + outdir) ret.append('--girdir=' + outdir) ret.append('--pkg=' + outfile) vapi_depends.append(target) vapi_packages.append(outfile) vapi_includes.append(srcdir) else: assert isinstance(arg, str), 'for mypy' vapi_packages.append(arg) remaining_args.append(arg) kwargs['packages'] = remaining_args vapi_args = ret + self._vapi_args_to_command('--pkg=', 'packages', kwargs, accept_vapi=True) return vapi_args, vapi_depends, vapi_packages, vapi_includes def _generate_deps(self, state: 'ModuleState', library: str, packages: T.List[str], install_dir: str) -> build.Data: outdir = state.environment.scratch_dir fname = os.path.join(outdir, library + '.deps') with open(fname, 'w', encoding='utf-8') as ofile: for package in packages: ofile.write(package + '\n') return build.Data([mesonlib.File(True, outdir, fname)], install_dir, install_dir, mesonlib.FileMode(), state.subproject) def _get_vapi_link_with(self, target: build.CustomTarget) -> T.List[T.Union[build.BuildTarget, build.CustomTarget]]: link_with: T.List[T.Union[build.BuildTarget, build.CustomTarget]] = [] for dep in target.get_target_dependencies(): if isinstance(dep, build.SharedLibrary): link_with.append(dep) elif isinstance(dep, GirTarget): link_with += self._get_vapi_link_with(dep) return link_with @permittedKwargs({'sources', 'packages', 'metadata_dirs', 'gir_dirs', 'vapi_dirs', 'install', 'install_dir'}) @typed_pos_args('gnome.generate_vapi', str) def generate_vapi(self, state: 'ModuleState', args: T.Tuple[str], kwargs) -> ModuleReturnValue: created_values = [] library = args[0] build_dir = os.path.join(state.environment.get_build_dir(), state.subdir) source_dir = os.path.join(state.environment.get_source_dir(), state.subdir) pkg_cmd, vapi_depends, vapi_packages, vapi_includes = self._extract_vapi_packages(state, kwargs) cmd: T.List[T.Union[str, 'ExternalProgram']] cmd = [state.find_program('vapigen')] cmd += ['--quiet', '--library=' + library, '--directory=' + build_dir] cmd += self._vapi_args_to_command('--vapidir=', 'vapi_dirs', kwargs) cmd += self._vapi_args_to_command('--metadatadir=', 'metadata_dirs', kwargs) cmd += self._vapi_args_to_command('--girdir=', 'gir_dirs', kwargs) cmd += pkg_cmd cmd += ['--metadatadir=' + source_dir] if 'sources' not in kwargs: raise MesonException('sources are required to generate the vapi file') inputs = mesonlib.extract_as_list(kwargs, 'sources') link_with = [] for i in inputs: if isinstance(i, str): cmd.append(os.path.join(source_dir, i)) elif isinstance(i, GirTarget): link_with += self._get_vapi_link_with(i) subdir = os.path.join(state.environment.get_build_dir(), i.get_subdir()) gir_file = os.path.join(subdir, i.get_outputs()[0]) cmd.append(gir_file) else: raise MesonException('Input must be a str or GirTarget') vapi_output = library + '.vapi' custom_kwargs = { 'command': cmd, 'input': inputs, 'output': vapi_output, 'depends': vapi_depends, } install_dir = kwargs.get('install_dir', os.path.join(state.environment.coredata.get_option(mesonlib.OptionKey('datadir')), 'vala', 'vapi')) if kwargs.get('install'): custom_kwargs['install'] = kwargs['install'] custom_kwargs['install_dir'] = install_dir # We shouldn't need this locally but we install it deps_target = self._generate_deps(state, library, vapi_packages, install_dir) created_values.append(deps_target) vapi_target = VapiTarget(vapi_output, state.subdir, state.subproject, custom_kwargs) # So to try our best to get this to just work we need: # - link with with the correct library # - include the vapi and dependent vapi files in sources # - add relevant directories to include dirs incs = [build.IncludeDirs(state.subdir, ['.'] + vapi_includes, False)] sources = [vapi_target] + vapi_depends rv = InternalDependency(None, incs, [], [], link_with, [], sources, [], {}) created_values.append(rv) return ModuleReturnValue(rv, created_values) def initialize(interp: 'Interpreter') -> GnomeModule: mod = GnomeModule(interp) mod.interpreter.append_holder_map(GResourceTarget, interpreter.CustomTargetHolder) mod.interpreter.append_holder_map(GResourceHeaderTarget, interpreter.CustomTargetHolder) mod.interpreter.append_holder_map(GirTarget, interpreter.CustomTargetHolder) mod.interpreter.append_holder_map(TypelibTarget, interpreter.CustomTargetHolder) mod.interpreter.append_holder_map(VapiTarget, interpreter.CustomTargetHolder) return mod
#!/usr/bin/python3 # ######################################################################## # Copyright (c) 2021 Advanced Micro Devices, Inc. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # # ######################################################################## # # Author: rocsparse-maintainer@amd.com # """Copyright 2021 Advanced Micro Devices, Inc. Manage build and installation""" import re import sys import os import platform import subprocess import argparse import pathlib from fnmatch import fnmatchcase args = {} param = {} OS_info = {} def parse_args(): """Parse command-line arguments""" parser = argparse.ArgumentParser(description="""Checks build arguments""") # # Common options # # Debug parser.add_argument('-g', '--debug', required=False, default = False, action='store_true', help='Generate Debug build (optional, default: False, Release is default mode)') # Build directory parser.add_argument( '--build_dir', type=str, required=False, default = "build", help='Build directory path (optional, default: build)') # ? parser.add_argument( '--skip_ld_conf_entry', required=False, default = False) # Static library parser.add_argument( '--static', required=False, default = False, dest='static_lib', action='store_true', help='Generate static library build (optional, default: False)') # Build client parser.add_argument('-c', '--clients', required=False, default = False, dest='build_clients', action='store_true', help='Generate all client builds (optional, default: False)') # Install after build. parser.add_argument('-i', '--install', required=False, default = False, dest='install', action='store_true', help='Install after build (optional, default: False)') # Add definition parser.add_argument( '--cmake-darg', required=False, dest='cmake_dargs', action='append', default=[], help='List of additional cmake defines for builds (optional, e.g. CMAKE)') # Verbose mode parser.add_argument('-v', '--verbose', required=False, default = False, action='store_true', help='Verbose build (optional, default: False)') # rocsparse parser.add_argument( '--clients-only', dest='clients_only', required=False, default = False, action='store_true', help='Build only clients with a pre-built library') parser.add_argument( '--rocprim_dir', dest='rocprim_dir', type=str, required=False, default = "", help='Specify path to an existing rocPRIM install directory (optional, default: /opt/rocm/rocprim)') return parser.parse_args() def os_detect(): global OS_info if os.name == "nt": OS_info["ID"] = platform.system() else: inf_file = "/etc/os-release" if os.path.exists(inf_file): with open(inf_file) as f: for line in f: if "=" in line: k,v = line.strip().split("=") OS_info[k] = v.replace('"','') OS_info["NUM_PROC"] = os.cpu_count() print(OS_info) def create_dir(dir_path): full_path = "" if os.path.isabs(dir_path): full_path = dir_path else: full_path = os.path.join( os.getcwd(), dir_path ) pathlib.Path(full_path).mkdir(parents=True, exist_ok=True) return def delete_dir(dir_path) : if (not os.path.exists(dir_path)): return if os.name == "nt": run_cmd( "RMDIR" , f"/S /Q {dir_path}") else: run_cmd( "rm" , f"-rf {dir_path}") def cmake_path(os_path): if os.name == "nt": return os_path.replace("\\", "/") else: return os_path def config_cmd(): global args global OS_info cwd_path = os.getcwd() cmake_executable = "" cmake_options = [] src_path = cmake_path(cwd_path) cmake_platform_opts = [] if os.name == "nt": # not really rocm path as none exist, HIP_DIR set in toolchain is more important rocm_path = os.getenv( 'ROCM_CMAKE_PATH', "C:/github/rocm-cmake-master/share/rocm") cmake_executable = "cmake" #set CPACK_PACKAGING_INSTALL_PREFIX= defined as blank as it is appended to end of path for archive creation cmake_platform_opts.append( f"-DCPACK_PACKAGING_INSTALL_PREFIX=" ) cmake_platform_opts.append( f"-DCMAKE_INSTALL_PREFIX=\"C:/hipSDK\"" ) generator = f"-G Ninja" cmake_options.append( generator ) toolchain = os.path.join( src_path, "toolchain-windows.cmake" ) else: rocm_path = os.getenv( 'ROCM_PATH', "/opt/rocm") cmake_executable = "cmake" cmake_platform_opts.append( f"-DROCM_DIR:PATH={rocm_path} -DCPACK_PACKAGING_INSTALL_PREFIX={rocm_path}" ) cmake_platform_opts.append( f"-DCMAKE_INSTALL_PREFIX=\"rocsparse-install\"" ) toolchain = "toolchain-linux.cmake" print( f"Build source path: {src_path}") tools = f"-DCMAKE_TOOLCHAIN_FILE={toolchain}" cmake_options.append( tools ) cmake_options.extend( cmake_platform_opts ) cmake_base_options = f"-DROCM_PATH={rocm_path} -DCMAKE_PREFIX_PATH:PATH={rocm_path}" cmake_options.append( cmake_base_options ) # packaging options cmake_pack_options = f"-DCPACK_SET_DESTDIR=OFF" cmake_options.append( cmake_pack_options ) if os.getenv('CMAKE_CXX_COMPILER_LAUNCHER'): cmake_options.append( f"-DCMAKE_CXX_COMPILER_LAUNCHER={os.getenv("CMAKE_CXX_COMPILER_LAUNCHER")}" ) # cmake_options.append("-DBUILD_TESTING=OFF") print( cmake_options ) # build type cmake_config = "" build_dir = os.path.abspath(args.build_dir) if not args.debug: build_path = os.path.join(build_dir, "release") cmake_config="Release" else: build_path = os.path.join(build_dir, "debug") cmake_config="Debug" cmake_options.append( f"-DCMAKE_BUILD_TYPE={cmake_config}" ) # clean delete_dir( build_path ) create_dir( os.path.join(build_path, "clients") ) os.chdir( build_path ) if args.static_lib: cmake_options.append( f"-DBUILD_SHARED_LIBS=OFF" ) if args.skip_ld_conf_entry: cmake_options.append( f"-DROCM_DISABLE_LDCONFIG=ON" ) if args.build_clients: cmake_build_dir = cmake_path(build_dir) cmake_options.append( f"-DBUILD_CLIENTS_TESTS=ON -DBUILD_CLIENTS_BENCHMARKS=ON -DBUILD_CLIENTS_SAMPLES=ON -DBUILD_DIR={cmake_build_dir}" ) # if args.clients_only: # if args.library_dir_installed: # library_dir = args.library_dir_installed # else: # library_dir = f"{rocm_path}/rocblas" # cmake_lib_dir = cmake_path(library_dir) # cmake_options.append( f"-DSKIP_LIBRARY=ON -DROCBLAS_LIBRARY_DIR={cmake_lib_dir}" ) # Reject # if args.cpu_ref_lib == 'blis': # cmake_options.append( f"-DLINK_BLIS=ON" ) # # Reject for now # # cmake_options.append( f"-DAMDGPU_TARGETS={args.gpu_architecture}" ) if args.cmake_dargs: for i in args.cmake_dargs: cmake_options.append( f"-D{i}" ) cmake_options.append( f"{src_path}") cmd_opts = " ".join(cmake_options) return cmake_executable, cmd_opts def make_cmd(): global args global OS_info make_options = [] nproc = OS_info["NUM_PROC"] if os.name == "nt": make_executable = f"cmake.exe --build . " # ninja if args.verbose: make_options.append( "--verbose" ) make_options.append( "--target all" ) if args.install: make_options.append( "--target package --target install" ) else: make_executable = f"make -j{nproc}" if args.verbose: make_options.append( "VERBOSE=1" ) if True: # args.install: make_options.append( "install" ) cmd_opts = " ".join(make_options) return make_executable, cmd_opts def run_cmd(exe, opts): program = f"{exe} {opts}" print(program) proc = subprocess.run(program, check=True, stderr=subprocess.STDOUT, shell=True) return proc.returncode def main(): global args os_detect() args = parse_args() # configure exe, opts = config_cmd() run_cmd(exe, opts) # make exe, opts = make_cmd() run_cmd(exe, opts) if __name__ == '__main__': main()
#!/usr/bin/python3 # ######################################################################## # Copyright (c) 2021 Advanced Micro Devices, Inc. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # # ######################################################################## # # Author: rocsparse-maintainer@amd.com # """Copyright 2021 Advanced Micro Devices, Inc. Manage build and installation""" import re import sys import os import platform import subprocess import argparse import pathlib from fnmatch import fnmatchcase args = {} param = {} OS_info = {} def parse_args(): """Parse command-line arguments""" parser = argparse.ArgumentParser(description="""Checks build arguments""") # # Common options # # Debug parser.add_argument('-g', '--debug', required=False, default = False, action='store_true', help='Generate Debug build (optional, default: False, Release is default mode)') # Build directory parser.add_argument( '--build_dir', type=str, required=False, default = "build", help='Build directory path (optional, default: build)') # ? parser.add_argument( '--skip_ld_conf_entry', required=False, default = False) # Static library parser.add_argument( '--static', required=False, default = False, dest='static_lib', action='store_true', help='Generate static library build (optional, default: False)') # Build client parser.add_argument('-c', '--clients', required=False, default = False, dest='build_clients', action='store_true', help='Generate all client builds (optional, default: False)') # Install after build. parser.add_argument('-i', '--install', required=False, default = False, dest='install', action='store_true', help='Install after build (optional, default: False)') # Add definition parser.add_argument( '--cmake-darg', required=False, dest='cmake_dargs', action='append', default=[], help='List of additional cmake defines for builds (optional, e.g. CMAKE)') # Verbose mode parser.add_argument('-v', '--verbose', required=False, default = False, action='store_true', help='Verbose build (optional, default: False)') # rocsparse parser.add_argument( '--clients-only', dest='clients_only', required=False, default = False, action='store_true', help='Build only clients with a pre-built library') parser.add_argument( '--rocprim_dir', dest='rocprim_dir', type=str, required=False, default = "", help='Specify path to an existing rocPRIM install directory (optional, default: /opt/rocm/rocprim)') return parser.parse_args() def os_detect(): global OS_info if os.name == "nt": OS_info["ID"] = platform.system() else: inf_file = "/etc/os-release" if os.path.exists(inf_file): with open(inf_file) as f: for line in f: if "=" in line: k,v = line.strip().split("=") OS_info[k] = v.replace('"','') OS_info["NUM_PROC"] = os.cpu_count() print(OS_info) def create_dir(dir_path): full_path = "" if os.path.isabs(dir_path): full_path = dir_path else: full_path = os.path.join( os.getcwd(), dir_path ) pathlib.Path(full_path).mkdir(parents=True, exist_ok=True) return def delete_dir(dir_path) : if (not os.path.exists(dir_path)): return if os.name == "nt": run_cmd( "RMDIR" , f"/S /Q {dir_path}") else: run_cmd( "rm" , f"-rf {dir_path}") def cmake_path(os_path): if os.name == "nt": return os_path.replace("\\", "/") else: return os_path def config_cmd(): global args global OS_info cwd_path = os.getcwd() cmake_executable = "" cmake_options = [] src_path = cmake_path(cwd_path) cmake_platform_opts = [] if os.name == "nt": # not really rocm path as none exist, HIP_DIR set in toolchain is more important rocm_path = os.getenv( 'ROCM_CMAKE_PATH', "C:/github/rocm-cmake-master/share/rocm") cmake_executable = "cmake" #set CPACK_PACKAGING_INSTALL_PREFIX= defined as blank as it is appended to end of path for archive creation cmake_platform_opts.append( f"-DCPACK_PACKAGING_INSTALL_PREFIX=" ) cmake_platform_opts.append( f"-DCMAKE_INSTALL_PREFIX=\"C:/hipSDK\"" ) generator = f"-G Ninja" cmake_options.append( generator ) toolchain = os.path.join( src_path, "toolchain-windows.cmake" ) else: rocm_path = os.getenv( 'ROCM_PATH', "/opt/rocm") cmake_executable = "cmake" cmake_platform_opts.append( f"-DROCM_DIR:PATH={rocm_path} -DCPACK_PACKAGING_INSTALL_PREFIX={rocm_path}" ) cmake_platform_opts.append( f"-DCMAKE_INSTALL_PREFIX=\"rocsparse-install\"" ) toolchain = "toolchain-linux.cmake" print( f"Build source path: {src_path}") tools = f"-DCMAKE_TOOLCHAIN_FILE={toolchain}" cmake_options.append( tools ) cmake_options.extend( cmake_platform_opts ) cmake_base_options = f"-DROCM_PATH={rocm_path} -DCMAKE_PREFIX_PATH:PATH={rocm_path}" cmake_options.append( cmake_base_options ) # packaging options cmake_pack_options = f"-DCPACK_SET_DESTDIR=OFF" cmake_options.append( cmake_pack_options ) if os.getenv('CMAKE_CXX_COMPILER_LAUNCHER'): cmake_options.append( f"-DCMAKE_CXX_COMPILER_LAUNCHER={os.getenv('CMAKE_CXX_COMPILER_LAUNCHER')}" ) # cmake_options.append("-DBUILD_TESTING=OFF") print( cmake_options ) # build type cmake_config = "" build_dir = os.path.abspath(args.build_dir) if not args.debug: build_path = os.path.join(build_dir, "release") cmake_config="Release" else: build_path = os.path.join(build_dir, "debug") cmake_config="Debug" cmake_options.append( f"-DCMAKE_BUILD_TYPE={cmake_config}" ) # clean delete_dir( build_path ) create_dir( os.path.join(build_path, "clients") ) os.chdir( build_path ) if args.static_lib: cmake_options.append( f"-DBUILD_SHARED_LIBS=OFF" ) if args.skip_ld_conf_entry: cmake_options.append( f"-DROCM_DISABLE_LDCONFIG=ON" ) if args.build_clients: cmake_build_dir = cmake_path(build_dir) cmake_options.append( f"-DBUILD_CLIENTS_TESTS=ON -DBUILD_CLIENTS_BENCHMARKS=ON -DBUILD_CLIENTS_SAMPLES=ON -DBUILD_DIR={cmake_build_dir}" ) # if args.clients_only: # if args.library_dir_installed: # library_dir = args.library_dir_installed # else: # library_dir = f"{rocm_path}/rocblas" # cmake_lib_dir = cmake_path(library_dir) # cmake_options.append( f"-DSKIP_LIBRARY=ON -DROCBLAS_LIBRARY_DIR={cmake_lib_dir}" ) # Reject # if args.cpu_ref_lib == 'blis': # cmake_options.append( f"-DLINK_BLIS=ON" ) # # Reject for now # # cmake_options.append( f"-DAMDGPU_TARGETS={args.gpu_architecture}" ) if args.cmake_dargs: for i in args.cmake_dargs: cmake_options.append( f"-D{i}" ) cmake_options.append( f"{src_path}") cmd_opts = " ".join(cmake_options) return cmake_executable, cmd_opts def make_cmd(): global args global OS_info make_options = [] nproc = OS_info["NUM_PROC"] if os.name == "nt": make_executable = f"cmake.exe --build . " # ninja if args.verbose: make_options.append( "--verbose" ) make_options.append( "--target all" ) if args.install: make_options.append( "--target package --target install" ) else: make_executable = f"make -j{nproc}" if args.verbose: make_options.append( "VERBOSE=1" ) if True: # args.install: make_options.append( "install" ) cmd_opts = " ".join(make_options) return make_executable, cmd_opts def run_cmd(exe, opts): program = f"{exe} {opts}" print(program) proc = subprocess.run(program, check=True, stderr=subprocess.STDOUT, shell=True) return proc.returncode def main(): global args os_detect() args = parse_args() # configure exe, opts = config_cmd() run_cmd(exe, opts) # make exe, opts = make_cmd() run_cmd(exe, opts) if __name__ == '__main__': main()
import scrape import mongo import datetime import argparse def get_args(): parser = argparse.ArgumentParser() parser.add_argument('--dbhost', help='hostname to connect a mongodb') parser.add_argument('--port', help='port number to connect a mongodb', type=int) parser.add_argument('--collection', help='collection in the db') parser.add_argument('--dbname', help='database name') parser.add_argument('--dbuser', help='username to connect a mongodb') parser.add_argument('--dbpass', help='password to connect a mongodb') parser.add_argument('--site', help='site to scrape') return parser.parse_args() if __name__ == "__main__": args = get_args() db = mongo.connect_mongodb(args.dbhost, args.port, args.dbname, args.dbuser, args.dbpass) collection = db[args.collection] if args.site != '': articles = scrape.scrape_reddit(args.site) for article in articles: # check if the post is already in the db by using url # add the post only if the entry does not exist in the db if mongo.check_post(collection, article['url']): # add date article['posted'] = datetime.datetime.now() mongo.insert_document(collection, article) else: print(f"{article["title"]} is already in db")
import scrape import mongo import datetime import argparse def get_args(): parser = argparse.ArgumentParser() parser.add_argument('--dbhost', help='hostname to connect a mongodb') parser.add_argument('--port', help='port number to connect a mongodb', type=int) parser.add_argument('--collection', help='collection in the db') parser.add_argument('--dbname', help='database name') parser.add_argument('--dbuser', help='username to connect a mongodb') parser.add_argument('--dbpass', help='password to connect a mongodb') parser.add_argument('--site', help='site to scrape') return parser.parse_args() if __name__ == "__main__": args = get_args() db = mongo.connect_mongodb(args.dbhost, args.port, args.dbname, args.dbuser, args.dbpass) collection = db[args.collection] if args.site != '': articles = scrape.scrape_reddit(args.site) for article in articles: # check if the post is already in the db by using url # add the post only if the entry does not exist in the db if mongo.check_post(collection, article['url']): # add date article['posted'] = datetime.datetime.now() mongo.insert_document(collection, article) else: print(f"{article['title']} is already in db")
# Copyright (c) 2017-2019, Stefan Grönke # Copyright (c) 2014-2018, iocage # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted providing that the following conditions # are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR # IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, # STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING # IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. """iocage Jail module.""" import typing import os import random import shlex import shutil import libzfs import freebsd_sysctl import libioc.Types import libioc.errors import libioc.events import libioc.helpers import libioc.helpers_object import libioc.JailState import libioc.DevfsRules import libioc.Host import libioc.Config.Jail.JailConfig import libioc.Network import libioc.Release import libioc.Storage import libioc.Storage.NullFSBasejail import libioc.Storage.Standalone import libioc.Storage.ZFSBasejail import libioc.ZFSShareStorage import libioc.LaunchableResource import libioc.VersionedResource import libioc.Config.Jail.Properties.ResourceLimit import libioc.ResourceSelector import libioc.Config.Jail.File.Fstab class JailResource( libioc.LaunchableResource.LaunchableResource, libioc.VersionedResource.VersionedResource ): """Resource that represents a jail.""" _jail: 'JailGenerator' _fstab: 'libioc.Config.Jail.File.Fstab.Fstab' host: 'libioc.Host.HostGenerator' root_datasets_name: typing.Optional[str] def __init__( self, jail: 'JailGenerator', dataset: typing.Optional[libzfs.ZFSDataset]=None, dataset_name: typing.Optional[str]=None, config_type: str="auto", config_file: typing.Optional[str]=None, logger: typing.Optional['libioc.Logger.Logger']=None, zfs: typing.Optional[libioc.ZFS.ZFS]=None, host: typing.Optional['libioc.Host.HostGenerator']=None, fstab: typing.Optional['libioc.Config.Jail.File.Fstab.Fstab']=None, root_datasets_name: typing.Optional[str]=None, ) -> None: self.host = libioc.helpers_object.init_host(self, host) self.root_datasets_name = root_datasets_name if fstab is not None: self._fstab = fstab if jail is not None: self._jail = jail libioc.LaunchableResource.LaunchableResource.__init__( self, dataset=dataset, dataset_name=dataset_name, config_type=config_type, config_file=config_file, logger=logger, zfs=zfs ) @property def jail(self) -> 'JailGenerator': """ Jail instance that belongs to the resource. Usually the resource becomes inherited from the jail itself. It can still be used linked to a foreign jail by passing jail as named attribute to the __init__ function """ try: return self._jail except AttributeError: pass # is instance of Jail itself if isinstance(self, JailGenerator): jail = self # type: JailGenerator return jail raise Exception("This resource is not a jail or not linked to one") @property def fstab(self) -> 'libioc.Config.Jail.File.Fstab.Fstab': """ Memoized fstab wrapper of a Jail. The fstab file is stored in the top level of a Jails dataset """ try: return self._fstab except AttributeError: pass try: release = self.release except AttributeError: release = None jail = self.jail fstab = libioc.Config.Jail.File.Fstab.Fstab( jail=jail, release=release, logger=self.logger, host=jail.host ) self._fstab = fstab return fstab @property def dataset_name(self) -> str: """ Name of the jail base ZFS dataset. If the resource has no dataset or dataset_name assigned yet, the jail id is used to find name the dataset """ try: return str(self._assigned_dataset_name) except AttributeError: pass try: return str(self._dataset.name) except AttributeError: pass return self._dataset_name_from_jail_name @dataset_name.setter def dataset_name(self, value: str) -> None: """ Override a jail's dataset name. This will cause Jail.dataset to point to this specific dataset instead of an auto-detected one to enable referencing jails from datasets that are not managed by iocage """ self._dataset_name = value def autoset_dataset_name(self) -> None: """ Automatically determine and set the dataset_name. When a jail was created with the new attribute enabled, the dataset might not exist, so that a dataset_name lookup would fail. Calling this method sets the jails dataset_name to a child dataset of the hosts jails dataset with the jails name. """ if self.root_datasets_name is None: base_name = self.host.datasets.main.jails.name else: base_name = self.host.datasets.__getitem__( self.root_datasets_name ).jails.name self.dataset_name = f"{base_name}/{self.name}" @property def _dataset_name_from_jail_name(self) -> str: jail_id = str(self.jail.config["id"]) if jail_id is None: raise libioc.errors.JailUnknownIdentifier() if self.root_datasets_name is None: base_name = self.host.datasets.main.jails.name else: try: base_name = self.host.datasets.__getitem__( self.root_datasets_name ).jails.name except KeyError: raise libioc.errors.SourceNotFound(logger=self.logger) return f"{base_name}/{jail_id}" @property def source(self) -> str: """Return the name of the jails source root datasets.""" return str( self.host.datasets.find_root_datasets_name(self.dataset_name) ) def get(self, key: str) -> typing.Any: """Get a config value from the jail or defer to its resource.""" try: return libioc.Resource.Resource.get(self, key) except AttributeError: pass return self.jail.config[key] class JailGenerator(JailResource): """ iocage unit orchestrates a jail's configuration and manages state. Jails are represented as a zfs dataset ``zpool/iocage/jails/<NAME>`` Directory Structure: zpool/iocage/jails/<NAME>: The jail's dataset containing it's configuration and root dataset. iocage-legacy used to store a jails configuration as ZFS properties on this dataset. Even though the modern JSON config mechanism is preferred. zpool/iocage/jails/<NAME>/root: This directory is the dataset used as jail's root when starting a jail. Usually the clone source of a root dataset is a snapshot of the release's root dataset. zpool/iocage/jails/<NAME>/config.json: Jails configured with the latest configuration style store their information in a JSON file. When this file is found in the jail's dataset, libiocage assumes the jail to be a JSON-style jail and ignores other configuration mechanisms. zpool/iocage/jails/<NAME>/config: Another compatible configuration mechanism is a UCL file. It's content is only taken into account if no JSON or ZFS configuration was found. Jail Types: Standalone: The /root dataset gets cloned from a release at creation time. It it not affected by changes to the Release and persists all data within the jail. NullFS Basejail: The fastest method to spawn a basejail by mounting read-only directories from the release's root dataset by creating a snapshot of the release on each boot of the jail. When a release is updated, the jail is updated as well on the next reboot. This type is the one used by the Python implementation of libioc. ZFS Basejail: Legacy basejails used to clone individual datasets from a release (stored in ``zpool/iocage/base/<RELEASE>``). """ _class_storage = libioc.Storage.Storage _state: typing.Optional['libioc.JailState.JailState'] _relative_hook_script_dir: str _provisioner: 'libioc.Provisioning.Prototype' def __init__( self, data: typing.Union[str, typing.Dict[str, typing.Any]]={}, dataset: typing.Optional[libzfs.ZFSDataset]=None, dataset_name: typing.Optional[str]=None, config_type: str="auto", config_file: typing.Optional[str]=None, logger: typing.Optional['libioc.Logger.Logger']=None, zfs: typing.Optional['libioc.ZFS.ZFS']=None, host: typing.Optional['libioc.Host.Host']=None, fstab: typing.Optional['libioc.Config.Jail.File.Fstab.Fstab']=None, root_datasets_name: typing.Optional[str]=None, new: bool=False ) -> None: """ Initialize a Jail. Args: data (string|dict): Jail configuration dict or jail name as string identifier. zfs (libzfs.ZFS): (optional) Inherit an existing libzfs.ZFS() instance from ancestor classes host (libioc.Host): (optional) Inherit an existing Host instance from ancestor classes logger (libioc.Logger): (optional) Inherit an existing Logger instance from ancestor classes """ self.logger = libioc.helpers_object.init_logger(self, logger) self.zfs = libioc.helpers_object.init_zfs(self, zfs) self.host = libioc.helpers_object.init_host(self, host) self._relative_hook_script_dir = "/.iocage" if isinstance(data, str): data = dict(id=data) if "id" in data.keys(): data["id"] = self._resolve_name(data["id"]) JailResource.__init__( self, jail=self, dataset=dataset, dataset_name=dataset_name, config_type=config_type, config_file=config_file, logger=self.logger, zfs=self.zfs, host=self.host, fstab=fstab, root_datasets_name=root_datasets_name ) if not new and (("id" not in data) or (data["id"] is None)): try: # try to get the Jail name from it's dataset_name data["id"] = self.dataset_name.split("/").pop() except libioc.errors.JailUnknownIdentifier: pass self.config = libioc.Config.Jail.JailConfig.JailConfig( host=self.host, jail=self, logger=self.logger ) self.config.clone(data) self.storage = self._class_storage( safe_mode=False, jail=self, logger=self.logger, zfs=self.zfs ) if new is False: self.config.read(data=self.read_config(), skip_on_error=True) if self.config["id"] is None: self.config["id"] = self.dataset_name.split("/").pop() @property def state(self) -> 'libioc.JailState.JailState': """ Memoized JailState. This object holds information about the jail state. The information is memoized on first access because the lookup is expensive. Please keep in mind to update the object when executing operations that potentially change a jails state. """ if "_state" not in object.__dir__(self): return self._init_state() elif object.__getattribute__(self, "_state") is None: return self._init_state() return object.__getattribute__(self, "_state") @state.setter def state(self, value: 'libioc.JailState.JailState') -> None: """ Return the jails JailState object. A public interface to set a jails state. This behavior is part of a performance optimization when dealing with large numbers of jails. """ object.__setattr__(self, '_state', value) @property def provisioner(self) -> 'libioc.Provisioning.prototype.Provisioner': """ Return the jails Provisioner instance. The provisioner itself is going to interpret the jails configuration dynamically, so that the Provisioner instance can be memoized. """ try: return self._provisioner except AttributeError: pass import libioc.Provisioning self._provisioner = libioc.Provisioning.Provisioner(jail=self) return self._provisioner def _init_state(self) -> 'libioc.JailState.JailState': state = libioc.JailState.JailState( self.identifier, logger=self.logger ) self.state = state state.query() return state def start( self, quick: bool=False, passthru: bool=False, single_command: typing.Optional[str]=None, event_scope: typing.Optional['libioc.events.Scope']=None, dependant_jails_seen: typing.List['JailGenerator']=[], start_dependant_jails: bool=True ) -> typing.Generator['libioc.events.IocEvent', None, None]: """ Start the jail. Args: quick (bool): Skip several operations that are not required when a jail was unchanged since its last start (for example when restarting it). passthru (bool): Execute commands in an interactive shell. single_command (str): When set the jail is launched non-persistent. The startup cycle reduces to the `prestart`, `command` and `poststop` hooks with the singe_command being executed in a /bin/sh context. event_scope (libioc.lib.events.Scope): (default=None) Provide an existing libiocage event scope or automatically create a new one instead. dependant_jails_seen (list[libioc.JailGenerator]): Jail depends can have circular dependencies. By passing a list of already started jails to the start command, iocage does not need to query their state, because they are known to be running already. This argument is internally used when starting a jails dependants recursively. start_dependant_jails (bool): When disabled, no dependant jails will be started. """ self.require_jail_existing() self.require_jail_stopped() self.require_jail_match_hostid() try: yield from self.config["resolver"].apply( jail=self, event_scope=event_scope ) except Exception as e: raise e events: typing.Any = libioc.events jailLaunchEvent = events.JailLaunch(jail=self, scope=event_scope) dependant_jails_started: typing.List[JailGenerator] = [] if start_dependant_jails is True: dependant_jails_seen.append(self) DependantsStartEvent = libioc.events.JailDependantsStart for event in self._start_dependant_jails( self.config["depends"], event_scope=event_scope, dependant_jails_seen=dependant_jails_seen ): if isinstance(event, DependantsStartEvent) is True: if event.done and (event.error is None): dependant_jails_started.extend(event.started_jails) yield event self._ensure_script_dir() jail_start_script_dir = "".join([ self.root_dataset.mountpoint, self._relative_hook_script_dir ]) if os.path.isdir(jail_start_script_dir) is False: os.makedirs(jail_start_script_dir, 0o755) exec_prestart: typing.List[str] = self._get_resource_limits_commands() exec_start: typing.List[str] = [ f". {self._relative_hook_script_dir}/.env" ] exec_created: typing.List[str] = [ f"echo \"export IOC_JID=$IOC_JID\" > {self.script_env_path}", "set -eu", ] exec_poststart: typing.List[str] = [] if self.config["vnet"]: _created, _start = self._start_vimage_network() exec_created += _created exec_start += _start exec_start += self._configure_localhost_commands() exec_start += self._configure_routes_commands() if self.host.ipfw_enabled is True: exec_start.append("service ipfw onestop") if self.config["jail_zfs"] is True: share_storage = self._zfs_share_storage share_storage.mount_zfs_shares() exec_start += share_storage.read_commands("jail") exec_created += share_storage.read_commands() if self.config["exec_prestart"] is not None: exec_prestart += [self.config["exec_prestart"]] if self.config["exec_created"] is not None: exec_created += [self.config["exec_created"]] if self.config["exec_start"] is not None and (single_command is None): exec_start += [self.config["exec_start"]] if self.config["exec_poststart"] is not None: exec_poststart += [self.config["exec_poststart"]] self._write_hook_script( "prestart", self._wrap_hook_script_command_string( exec_prestart, ignore_errors=False ) ) self._write_hook_script( "created", self._wrap_hook_script_command_string( exec_created, ) ) self._write_hook_script( "start", self._wrap_hook_script_command_string( exec_start, jailed=True, ignore_errors=False ) ) self._write_hook_script( "poststart", self._wrap_hook_script_command_string([ "set -eu", "/bin/echo running exec.created hook on the host", f"/bin/sh {self.get_hook_script_path("created")} 2>&1", "/bin/echo running exec.start hook in the jail", ( f"/usr/sbin/jexec {self.identifier} " f"{self._relative_hook_script_dir}/start.sh" ), "/bin/echo running exec.poststart hook on the host", ] + exec_poststart) ) yield jailLaunchEvent.begin() def _stop_failed_jail( ) -> typing.Generator['libioc.events.IocEvent', None, None]: jails_to_stop = [self] if start_dependant_jails is True: jails_to_stop.extend(list(reversed(dependant_jails_started))) for jail_to_stop in jails_to_stop: yield from jail_to_stop.stop( force=True, event_scope=jailLaunchEvent.scope ) jailLaunchEvent.add_rollback_step(_stop_failed_jail) if self.is_basejail is True: self.storage_backend.apply(self.storage, self.release) if quick is False: unknown_config_parameters = list( self.config.unknown_config_parameters ) if len(unknown_config_parameters) > 0: _unused_parameters = str(", ".join(unknown_config_parameters)) self.logger.warn( f"Unused JailConfig parameters: {_unused_parameters}" ) self._save_autoconfig() try: self._prepare_stop() if single_command is None: stdout, stderr, returncode = self._launch_persistent_jail( passthru=passthru ) else: stdout, stderr, returncode = self._launch_single_command_jail( single_command, passthru=passthru ) if returncode != 0: raise libioc.errors.JailLaunchFailed( jail=self, logger=self.logger ) except libioc.errors.IocException as e: yield from jailLaunchEvent.fail_generator(e) raise e yield jailLaunchEvent.end(stdout=stdout) @property def _zfs_share_storage( self ) -> libioc.ZFSShareStorage.QueuingZFSShareStorage: return libioc.ZFSShareStorage.QueuingZFSShareStorage( jail=self, logger=self.logger ) def _start_dependant_jails( self, terms: libioc.Filter.Terms, dependant_jails_seen: typing.List['JailGenerator'], event_scope: typing.Optional['libioc.events.Scope']=None ) -> typing.Generator['libioc.events.IocEvent', None, None]: jailDependantsStartEvent = libioc.events.JailDependantsStart( jail=self, scope=event_scope ) started_jails: typing.List[JailGenerator] = [] yield jailDependantsStartEvent.begin() _depends = self.config["depends"] if len(_depends) == 0: yield jailDependantsStartEvent.skip("No dependant jails") return dependant_jails = sorted( libioc.Jails.JailsGenerator( filters=_depends, host=self.host, logger=self.logger, zfs=self.zfs ), key=lambda x: x.config["priority"] ) for dependant_jail in dependant_jails: if dependant_jail == self: self.logger.warn(f"The jail {self.name} depends on itself") continue if dependant_jail in dependant_jails_seen: self.logger.spam( f"Circular dependency {dependant_jail.name} - skipping" ) continue dependant_jails_seen.append(dependant_jail) jailDependantStartEvent = libioc.events.JailDependantStart( jail=dependant_jail, scope=jailDependantsStartEvent.scope ) yield jailDependantStartEvent.begin() dependant_jail.state.query() if dependant_jail.running is True: yield jailDependantStartEvent.skip("already running") continue try: yield from dependant_jail.start( event_scope=jailDependantStartEvent.scope, dependant_jails_seen=dependant_jails_seen ) except libioc.errors.IocException as err: yield jailDependantStartEvent.fail(err) yield from jailDependantsStartEvent.fail_generator(err) raise err yield jailDependantStartEvent.end() started_jails.append(dependant_jail) # revert start of previously started dependants after failure def _revert_start( jail: JailGenerator ) -> typing.Callable[ [], typing.Generator['libioc.events.IocEvent', None, None] ]: def revert_method() -> typing.Generator[ 'libioc.events.IocEvent', None, None ]: yield from jail.stop(force=True) return revert_method jailDependantsStartEvent.add_rollback_step( _revert_start(dependant_jail) ) yield jailDependantsStartEvent.end( started_jails=started_jails ) def _run_poststop_hook_manually(self) -> None: self.logger.debug("Running poststop hook manually") libioc.helpers.exec(self.get_hook_script_path("poststop")) def _wrap_jail_command( self, commands: typing.Optional[typing.List[str]] ) -> typing.List[str]: """Wrap a jail hook command for a host hook script.""" if commands is None: return [] EOF_IDENTIFIER = f"EOF{random.getrandbits(64)}" output: typing.List[str] = [ "set -eu", "echo 'Executing jail start scripts'", "jexec -j {self.identifier} /bin/sh <<{EOF_IDENTIFIER}" ] + commands + [ EOF_IDENTIFIER, "set +e" ] return output def _wrap_hook_script_command( self, commands: typing.Optional[typing.Union[str, typing.List[str]]], ignore_errors: bool=True, jailed: bool=False, # ToDo: remove unused argument write_env: bool=True ) -> typing.List[str]: if isinstance(commands, str): return [commands] elif commands is None: return [] else: return commands def _wrap_hook_script_command_string( self, commands: typing.Optional[typing.Union[str, typing.List[str]]], ignore_errors: bool=True, jailed: bool=False, write_env: bool=True ) -> str: return "\n".join(self._wrap_hook_script_command( commands=commands, ignore_errors=ignore_errors, jailed=jailed, write_env=write_env )) def fork_exec( self, command: str, passthru: bool=False, event_scope: typing.Optional['libioc.events.Scope']=None, start_dependant_jails: bool=True, dependant_jails_seen: typing.List['JailGenerator']=[], **temporary_config_override: typing.Any ) -> typing.Generator['libioc.events.IocEvent', None, None]: """ Start a jail, run a command and shut it down immediately. Args: command (string): The command to execute in the jail. passthru (bool): Execute commands in an interactive shell. event_scope (libioc.lib.events.Scope): (default=None) Provide an existing libiocage event scope or automatically create a new one instead. dependant_jails_seen (list[libioc.JailGenerator]): Jail depends can have circular dependencies. By passing a list of already started jails to the start command, iocage does not need to query their state, because they are known to be running already. This argument is internally used when starting a jails dependants recursively. start_dependant_jails (bool): When disabled, no dependant jails will be started. **temporary_config_override (dict(str, any)): Other named arguments temporary override JailConfig properties. For example: jail = libioc.JailGenerator("myjail") events = jail.fork_exec("ifconfig", vnet=False) print(list(events)) """ self.require_jail_existing() self.require_jail_stopped() original_config = self.config config_data = original_config.data for key, value in temporary_config_override.items(): config_data[key] = value self.config = libioc.Config.Jail.JailConfig.JailConfig( host=self.host, jail=self, logger=self.logger ) self.config.clone(original_config.data) try: fork_exec_events = JailGenerator.start( self, single_command=command, passthru=passthru, event_scope=event_scope, dependant_jails_seen=dependant_jails_seen, start_dependant_jails=start_dependant_jails ) for event in fork_exec_events: yield event finally: self.config = original_config def _run_hook(self, hook_name: str) -> typing.Optional[ libioc.helpers.CommandOutput ]: """ Execute a jail hook. Hooks are executed during the start and stop process of the jail. """ key = f"exec_{hook_name}" value = str(self.config.get(key, "/usr/bin/true")) if value == "/usr/bin/true": return None self.logger.verbose( f"Running {hook_name} hook for {self.humanreadable_name}" ) lex = shlex.shlex(value) # noqa: T484 lex.whitespace_split = True command = list(lex) if (hook_name == "start") or (hook_name == "stop"): return self.exec( command, passthru=False ) # ToDo: Deprecate and remove this method raise NotImplementedError("_run_hook only supports start/stop") def _ensure_script_dir(self) -> None: """Ensure that the launch scripts dir exists.""" realpath = os.path.realpath(self.launch_script_dir) if realpath.startswith(self.dataset.mountpoint) is False: raise libioc.errors.SecurityViolationConfigJailEscape( file=realpath ) if os.path.isdir(realpath) is False: os.makedirs(realpath, 0o755) def _prepare_stop(self) -> None: self._ensure_script_dir() exec_prestop = [] exec_stop = [] exec_poststop = self._teardown_mounts() + self._clear_resource_limits() # ToDo: self.config.get("exec_prestop", "") if self.config["exec_prestop"] is not None: exec_prestop.append(self.config["exec_prestop"]) if self.config["exec_stop"] is not None: exec_stop.append(self.config["exec_stop"]) exec_poststop = self._stop_network() + exec_poststop if self.config["exec_poststop"] is not None: exec_poststop.append(self.config["exec_poststop"]) if self.config["jail_zfs"] is True: share_storage = libioc.ZFSShareStorage.QueuingZFSShareStorage( jail=self, logger=self.logger ) share_storage.umount_zfs_shares() exec_stop += share_storage.read_commands("jail") exec_poststop += share_storage.read_commands() if self.running and (os.path.isfile(self.script_env_path) is False): # when a jail was started from other iocage variants self._write_temporary_script_env() exec_poststop.append(f"rm \"{shlex.quote(self.script_env_path)}\"") self._write_hook_script( "prestop", self._wrap_hook_script_command_string(exec_prestop) ) self._write_hook_script( "stop", self._wrap_hook_script_command_string( exec_stop, jailed=True, ignore_errors=True ) ) self._write_hook_script( "poststop", self._wrap_hook_script_command_string( exec_poststop, write_env=False, ignore_errors=True ) ) def stop( self, force: bool=False, event_scope: typing.Optional['libioc.events.Scope']=None, log_errors: bool=True ) -> typing.Generator['libioc.events.IocEvent', None, None]: """ Stop a jail. Args: force (bool): (default=False) Ignores failures and enforces teardown if True. event_scope (libioc.lib.events.Scope): (default=None) Provide an existing libiocage event scope or automatically create a new one instead. log_errors (bool): (default=True) When disabled errors are not passed to the logger. This is useful in scripted contexts when then stop operation was executed to enforce a defined jail state. """ if force is False: self.require_jail_existing(log_errors=log_errors) self.require_jail_running(log_errors=log_errors) events: typing.Any = libioc.events jailDestroyEvent = events.JailDestroy(self, scope=event_scope) self._prepare_stop() yield jailDestroyEvent.begin() try: self._write_jail_conf(force=force) self._destroy_jail(log_errors=log_errors) except Exception as e: if force is True: yield jailDestroyEvent.skip() self.logger.debug( "Manually executing prestop and poststop hooks" ) try: for hook_name in ["prestop", "poststop"]: libioc.helpers.exec( command=[self.get_hook_script_path(hook_name)] ) except Exception as e: self.logger.warn(str(e)) else: yield jailDestroyEvent.fail(e) raise e yield jailDestroyEvent.end() try: self.state.query() except Exception as e: if force is True: self.logger.warn(str(e)) else: raise e def _write_temporary_script_env(self) -> None: self.logger.debug( f"Writing the hook script .env file {self.script_env_path}" f" for JID {self.jid}" ) self._ensure_script_dir() with open(self.script_env_path, "w") as f: f.write(f"export IOC_JID={self.jid}") def _write_jail_conf(self, force: bool=False) -> None: if force is True: stop_command = "/usr/bin/true" else: stop_command = ( f"[ -f \"{self._relative_hook_script_dir}/stop.sh\" ]" " || exit 0; " f". {self._relative_hook_script_dir}/stop.sh" ) content = "\n".join([ self.identifier + " {", ( "exec.prestop = " f"\"/bin/sh {self.get_hook_script_path("prestop")}\";" ), ( "exec.poststop = " f"\"/bin/sh {self.get_hook_script_path("poststop")}\";" ), ( f"exec.stop = \"{stop_command}\";" ), ( f"exec.jail_user = {self._get_value("exec_jail_user")};" ), "}" ]) self.logger.debug(f"Writing jail.conf file to {self._jail_conf_file}") with open(self._jail_conf_file, "w") as f: f.write(content) @property def _jail_conf_file(self) -> str: return f"{self.launch_script_dir}/jail.conf" def restart( self, shutdown: bool=False, force: bool=False, event_scope: typing.Optional['libioc.events.Scope']=None ) -> typing.Generator['libioc.events.IocEvent', None, None]: """Restart the jail.""" failed: bool = False jailRestartEvent = libioc.events.JailRestart( jail=self, scope=event_scope ) jailShutdownEvent = libioc.events.JailShutdown( jail=self, scope=jailRestartEvent.scope ) JailSoftShutdownEvent = libioc.events.JailSoftShutdown( jail=self, scope=jailRestartEvent.scope ) jailStartEvent = libioc.events.JailStart( jail=self, scope=jailRestartEvent.scope ) yield jailRestartEvent.begin() if shutdown is False: # soft stop yield JailSoftShutdownEvent.begin() try: self._run_hook("stop") yield JailSoftShutdownEvent.end() except libioc.errors.IocException: yield JailSoftShutdownEvent.fail(exception=False) # service start yield jailStartEvent.begin() try: self._run_hook("start") yield jailStartEvent.end() except libioc.errors.IocException: yield jailStartEvent.fail(exception=False) else: # full shutdown yield jailShutdownEvent.begin() try: for event in self.stop(): yield event yield jailShutdownEvent.end() except libioc.errors.IocException: failed = True yield jailShutdownEvent.fail(exception=False) if force is False: # only continue when force is enabled yield jailRestartEvent.fail(exception=False) return # start yield jailStartEvent.begin() try: for event in self.start(): yield event yield jailStartEvent.end() except libioc.errors.IocException: failed = True yield jailStartEvent.fail(exception=False) # respond to failure if failed is True: yield jailRestartEvent.fail(exception=False) return yield jailRestartEvent.end() def destroy( self, force: bool=False, force_stop: bool=False, event_scope: typing.Optional['libioc.events.Scope']=None ) -> typing.Generator['libioc.events.IocEvent', None, None]: """ Destroy a Jail and it's datasets. Args: force (bool): (default=False) This flag enables whether an existing jail should be shut down before destroying the dataset. By default destroying a jail requires it to be stopped. force_stop (bool): (default=False) A jail is force stopped when either the force_stop argument was set or the force option was enabled and the jail is running. When being enabled the argument invokes a full stop before destroying the jail. """ self.state.query() if event_scope is None: event_scope = libioc.events.Scope() _stop_jail = force_stop if force is False: self.require_jail_stopped() else: _stop_jail = (self.running is True) if _stop_jail is True: try: stop_events = JailGenerator.stop( self, force=True, event_scope=event_scope, log_errors=(force_stop is False) ) for event in stop_events: yield event except libioc.lib.errors.JailDestructionFailed: pass zfsDatasetDestroyEvent = libioc.events.ZFSDatasetDestroy( dataset=self.dataset, scope=event_scope ) yield zfsDatasetDestroyEvent.begin() try: self.zfs.delete_dataset_recursive(self.dataset) except Exception as e: zfsDatasetDestroyEvent.fail(e) raise e yield zfsDatasetDestroyEvent.end() def rename( self, new_name: str, event_scope: typing.Optional['libioc.events.Scope']=None ) -> typing.Generator['libioc.events.IocEvent', None, None]: """ Change the name of a jail. Args: new_name (str): The new name of a jail. It might not be used by another Jail and must differ from the current name. """ self.require_jail_existing() self.require_jail_stopped() self.require_storage_backend() if libioc.helpers.validate_name(new_name) is False: raise libioc.errors.InvalidJailName( name=new_name, logger=self.logger ) current_id = self.config["id"] current_mountpoint = self.dataset.mountpoint jailRenameEvent = libioc.events.JailRename( jail=self, current_name=current_id, new_name=new_name, scope=event_scope ) self.config["id"] = new_name # validates new_name yield jailRenameEvent.begin() self.logger.debug(f"Renaming jail {current_id} to {new_name}") def revert_id_change() -> None: self.config["id"] = current_id self.logger.debug(f"Jail id reverted to {current_id}") jailRenameEvent.add_rollback_step(revert_id_change) try: events = self.storage_backend.rename( self.storage, new_name=new_name, event_scope=jailRenameEvent.scope ) for event in events: yield jailRenameEvent.child_event(event) if event.error is not None: raise event.error except BaseException as e: yield jailRenameEvent.fail(e) raise e # Update fstab to the new dataset fstab_path_events = self._update_fstab_paths( current_mountpoint, event_scope=jailRenameEvent.scope ) for event in fstab_path_events: yield event yield jailRenameEvent.end() def _update_fstab_paths( self, old_path_prefix: str, new_path_prefix: typing.Optional[str]=None, event_scope: typing.Optional['libioc.events.Scope']=None ) -> typing.Generator['libioc.events.IocEvent', None, None]: """ Update a path in the whole fstab file. When no new_path_prefix is provided, the jail's root dataset is used. """ if new_path_prefix is None: _new_path_prefix = self.dataset.mountpoint else: _new_path_prefix = new_path_prefix jailFstabUpdateEvent = libioc.events.JailFstabUpdate( jail=self, scope=event_scope ) yield jailFstabUpdateEvent.begin() try: self.fstab.read_file() self.fstab.replace_path( old_path_prefix, _new_path_prefix ) self.fstab.save() yield jailFstabUpdateEvent.end() except BaseException as e: yield jailFstabUpdateEvent.fail(e) raise e def create( self, resource: typing.Optional[typing.Union[ 'JailGenerator', 'libioc.Release.ReleaseGenerator', str ]]=None ) -> None: """ Create a Jail from a given Resource. Args: resource (Jail or Release): The (new) jail is created from this resource. If no resource is specified, an empty dataset will be created """ if isinstance(resource, str): resource = libioc.Release(resource) if isinstance(resource, JailGenerator): self.create_from_template(template=resource) elif isinstance(resource, libioc.Release.ReleaseGenerator): self.create_from_release(release=resource) else: self.create_from_scratch() self._ensure_script_dir() def create_from_scratch( self ) -> None: """Create a new jail without any root dataset content.""" self._create_skeleton() def create_from_release( self, release: 'libioc.Release.ReleaseGenerator' ) -> None: """ Create a Jail from a Release. Args: resource (Release): The jail is created from the provided resource. This can be either another Jail or a Release. """ if release.fetched is False: raise libioc.errors.ReleaseNotFetched( name=release.name, logger=self.logger ) self.config["release"] = release.full_name self._create_from_resource(release) def create_from_template( self, template: 'JailGenerator' ) -> None: """Create a Jail from a template Jail.""" template.require_jail_is_template() existing_config_keys = list(self.config.keys()) for key in template.config.keys(): if key in (["id", "name", "template"] + existing_config_keys): continue self.config[key] = template.config[key] self.config['release'] = template.release.full_name self.config['basejail'] = template.config['basejail'] self.config['basejail_type'] = template.config['basejail_type'] self._create_from_resource(template) def promote(self) -> None: """Promote all datasets of the jail.""" self.zfs.promote_dataset(self.dataset, logger=self.logger) def clone_from_jail( self, source_jail: 'JailGenerator', event_scope: typing.Optional['libioc.events.Scope']=None ) -> typing.Generator['libioc.events.IocEvent', None, None]: """Create a Jail from another Jail.""" self.autoset_dataset_name() if event_scope is None: event_scope = libioc.events.Scope() yield from source_jail.clone_to_dataset( self.dataset_name, event_scope=event_scope ) self.config.clone(source_jail.config.data, skip_on_error=True) self.save() fstab_update_generator = self._update_fstab_paths( source_jail.root_dataset.mountpoint, event_scope=event_scope ) for event in fstab_update_generator: yield event def clone_to_dataset( self, destination_dataset_name: str, delete_existing: bool=False, event_scope: typing.Optional['libioc.events.Scope']=None ) -> typing.Generator['libioc.events.IocEvent', None, None]: """Clones the jails dataset to another dataset with the given name.""" jailCloneEvent = libioc.events.JailClone( jail=self, scope=event_scope ) yield jailCloneEvent.begin() try: self.zfs.clone_dataset( source=self.dataset, target=destination_dataset_name, delete_existing=delete_existing ) except Exception as e: err = libioc.errors.ZFSException( *e.args, logger=self.logger ) yield jailCloneEvent.fail(err) raise err yield jailCloneEvent.end() def _create_skeleton(self) -> None: if self.config["id"] is None: self.config["id"] = str(libioc.helpers.get_random_uuid()) self.require_jail_not_existing() self.logger.verbose( f"Creating jail '{self.config["id"]}'" ) for key, value in self.config.data.items(): msg = f"{key} = {value}" self.logger.spam(msg, indent=1) self.create_resource() def _create_from_resource( self, resource: 'libioc.Resource.Resource' ) -> None: self._create_skeleton() backend = self.storage_backend if backend is not None: backend.setup(self.storage, resource) self.config["hostid"] = self.host.id self._update_fstab() self.save() @property def is_basejail(self) -> bool: """ Return True if a Jail is a basejail. If this is the case, parts of the jails dataset will be mounted from its release or upstream Jail (for example a Template) """ return self.config.get("basejail", False) is True @property def storage_backend(self) -> libioc.Storage.Storage: """ Return the jail storage abstraction class. Returns the class that represents the jails storage backend according to its configuration. """ if not self.is_basejail: return libioc.Storage.Standalone.StandaloneJailStorage if self.config["basejail_type"] == "nullfs": return libioc.Storage.NullFSBasejail.NullFSBasejailStorage if self.config["basejail_type"] == "zfs": return libioc.Storage.ZFSBasejail.ZFSBasejailStorage def save(self) -> None: """Permanently save a jail's configuration.""" self._write_config(self.config.data) self._save_autoconfig() def _save_autoconfig(self) -> None: """Save auto-generated files.""" self.rc_conf.save() self._update_fstab() def _update_fstab(self) -> None: if self.config["basejail_type"] == "nullfs": self.fstab.release = self.release else: self.fstab.release = None self.fstab.read_file() self.fstab.save() def exec( self, command: typing.List[str], env: typing.Dict[str, str]={}, passthru: bool=False, **kwargs: typing.Any ) -> libioc.helpers.CommandOutput: """ Execute a command in a running jail. command (list): A list of command and it's arguments Example: ["/usr/bin/whoami"] env (dict): The dictionary may contain env variables that will be forwarded to the executed jail command. passthru (bool): (default=False) When enabled the commands stdout and stderr are directory forwarded to the attached terminal. The results will not be included in the CommandOutput, so that (None, None, <returncode>) is returned. """ command = ["/usr/sbin/jexec", str(self.jid)] + command command_env = self.env for env_key, env_value in env.items(): command_env[env_key] = env_value stdout, stderr, returncode = self._exec_host_command( command, env=command_env, passthru=passthru ) return stdout, stderr, returncode def passthru( self, command: typing.List[str], env: typing.Optional[typing.Dict[str, str]]=None ) -> libioc.helpers.CommandOutput: """ Execute a command in a started jail and passthrough STDIN and STDOUT. command (list): A list of command and it's arguments Example: ["/bin/sh"] """ if isinstance(command, str): command = [command] return self._exec_host_command( command=[ "/usr/sbin/jexec", str(self.jid) ] + command, passthru=True, env=env ) def exec_console( self ) -> libioc.helpers.CommandOutput: """Shortcut to drop into a shell of a started jail.""" self.require_jail_running() return self.passthru( ["/usr/bin/login"] + self.config["login_flags"] ) def _destroy_jail(self, log_errors: bool=True) -> None: stdout, stderr, returncode = self._exec_host_command( [ "/usr/sbin/jail", "-v", "-r", "-f", self._jail_conf_file, self.identifier ], passthru=False, env=self.env ) if returncode > 0: raise libioc.errors.JailDestructionFailed( jail=self, logger=(self.logger if log_errors else None) ) @property def _dhcp_enabled(self) -> bool: """Return True if any ip4_addr uses DHCP.""" if self.config["ip4_addr"] is None: return False return ("dhcp" in self.config["ip4_addr"].networks) is True @property def devfs_ruleset(self) -> libioc.DevfsRules.DevfsRuleset: """ Return the number of the jail's devfs ruleset. When a new combination of the base ruleset specified in jail.config["devfs_ruleset"] and rules automatically added by iocage appears, the according rule is automatically created and added to the /etc/devfs.rules file on the host Users may reference a rule by numeric identifier or name. This numbers are automatically selected, so it's advisable to use names.1 """ try: configured_devfs_ruleset = self.host.devfs.find_by_number( int(self.config["devfs_ruleset"]) ) except ValueError: configured_devfs_ruleset = self.host.devfs.find_by_name( self.config["devfs_ruleset"] ) devfs_ruleset = libioc.DevfsRules.DevfsRuleset() devfs_ruleset.clone(configured_devfs_ruleset) if self._dhcp_enabled is True: devfs_ruleset.append("add path 'bpf*' unhide") if self._allow_mount_zfs == "1": devfs_ruleset.append("add path zfs unhide") if self.config["jail_zfs"] is True: unhidden_parents: typing.Set[str] = set() shared_datasets = self._zfs_share_storage.get_zfs_datasets() if len(shared_datasets) > 0: devfs_ruleset.append("add path zvol unhide") for shared_dataset in shared_datasets: current_dataset_name = "zvol" for fragment in shared_dataset.name.split("/"): current_dataset_name += f"/{fragment}" if current_dataset_name in unhidden_parents: continue unhidden_parents.add(current_dataset_name) devfs_ruleset.append( f"add path {current_dataset_name} unhide" ) devfs_ruleset.append( f"add path {current_dataset_name}/* unhide" ) if self.config["allow_vmm"] is True: devfs_ruleset.append("add path vmm unhide") devfs_ruleset.append("add path vmm/* unhide") devfs_ruleset.append("add path nmdm* unhide") # create if the final rule combination does not exist as ruleset if devfs_ruleset not in self.host.devfs: self.logger.verbose("New devfs ruleset combination") # note: name and number of devfs_ruleset are both None new_ruleset_number = self.host.devfs.new_ruleset(devfs_ruleset) self.host.devfs.save() return new_ruleset_number else: ruleset_line_position = self.host.devfs.index(devfs_ruleset) return self.host.devfs[ruleset_line_position].number @staticmethod def __get_launch_command(jail_args: typing.List[str]) -> typing.List[str]: return ["/usr/sbin/jail", "-c"] + jail_args @property def _launch_args(self) -> typing.List[str]: config = self.config vnet = (config["vnet"] is True) value: str jail_param_args: typing.List[str] = [] for sysctl_name, sysctl in libioc.JailParams.JailParams().items(): if sysctl.ctl_type == freebsd_sysctl.types.NODE: # skip NODE continue if sysctl_name == "security.jail.param.devfs_ruleset": value = str(self.devfs_ruleset) elif sysctl_name == "security.jail.param.path": value = self.root_dataset.mountpoint elif sysctl_name == "security.jail.param.name": value = self.identifier elif sysctl_name == "security.jail.param.allow.mount.zfs": value = str(self._allow_mount_zfs) elif sysctl_name == "security.jail.param.vnet": if vnet is False: # vnet is only used when explicitly enabled # (friendly to Kernels without VIMAGE support) continue value = "vnet" elif vnet and sysctl_name.startswith("security.jail.param.ip"): continue else: config_property_name = sysctl.iocage_name if self.config._is_known_property(config_property_name): value = config[config_property_name] else: continue sysctl.value = value jail_param_args.append(str(sysctl)) jail_args = [ f"exec.timeout={self._get_value("exec_timeout")}", f"stop.timeout={self._get_value("stop_timeout")}", f"exec.prestart=\"{self.get_hook_script_path("prestart")}\"", f"exec.prestop=\"{self.get_hook_script_path("prestop")}\"", f"exec.poststop=\"{self.get_hook_script_path("poststop")}\"", f"exec.jail_user={self._get_value("exec_jail_user")}", f"mount.fstab={self.fstab.path}", f"mount.devfs={self._get_value("mount_devfs")}", "allow.dying" ] return jail_param_args + jail_args def _launch_persistent_jail( self, passthru: bool ) -> libioc.helpers.CommandOutput: command = self.__get_launch_command(self._launch_args + [ "persist", f"exec.poststart=\"{self.get_hook_script_path("poststart")}\"" ]) stdout, stderr, returncode = self._exec_host_command( command=command, passthru=passthru, env=self.env ) if returncode > 0: self.logger.verbose( f"Jail '{self.humanreadable_name}' was not started" ) return stdout, stderr, returncode self.state.query() self.logger.verbose( f"Jail '{self.humanreadable_name}' started with JID {self.jid}" ) return stdout, stderr, returncode def _exec_host_command( self, command: typing.List[str], passthru: bool, env: typing.Optional[typing.Dict[str, str]]=None ) -> libioc.helpers.CommandOutput: try: if passthru is True: return libioc.helpers.exec_passthru( command, logger=self.logger, env=env ) else: exec_events = libioc.helpers.exec_generator( command, logger=self.logger, env=env ) try: while True: self.logger.spam( next(exec_events).decode("UTF-8"), indent=1 ) except StopIteration as return_statement: output: libioc.helpers.CommandOutput output = return_statement.value # noqa: T484 return output except (KeyboardInterrupt, SystemExit): raise libioc.errors.JailExecutionAborted( jail=self, logger=None ) def _launch_single_command_jail( self, jail_command: str, passthru: bool ) -> libioc.helpers.CommandOutput: command = self.__get_launch_command(self._launch_args + [ "nopersist", f"exec.poststart=\"{self.get_hook_script_path("host_command")}\"", "command=/usr/bin/true" ]) _identifier = str(shlex.quote(self.identifier)) _jls_command = f"/usr/sbin/jls -j {_identifier} jid" self._write_hook_script("host_command", "\n".join( [ f"IOC_JID=$({_jls_command} 2>&1 || echo -1)", "set -e", f"/bin/sh {self.get_hook_script_path("created")}", ( f"/usr/sbin/jexec {self.identifier} " f"{self._relative_hook_script_dir}/command.sh" " 2>&1" ), f"/bin/sh {self.get_hook_script_path("poststop")}" ] )) _ipfw_enabled = self.host.ipfw_enabled self._write_hook_script("command", "\n".join( (["set +e", "service ipfw onestop"] if _ipfw_enabled else []) + [ "set -e", f". {self._relative_hook_script_dir}/start.sh", jail_command, ] )) stdout, stderr, returncode = self._exec_host_command( command=command, passthru=passthru, env=self.env ) if returncode > 0: message = f"Jail {self.humanreadable_name} command failed." else: message = f"Jail {self.humanreadable_name} command finished." self.logger.verbose(message) return stdout, stderr, returncode def _get_value(self, key: str) -> str: """Return jail command consumable config value string.""" return str(libioc.helpers.to_string( self.config[key], true="1", false="0", none="" )) @property def networks(self) -> typing.List[libioc.Network.Network]: """Return the list of a jails configured networks.""" networks = [] nics = self.config["interfaces"] if nics is None: return [] for nic in nics: bridge = nics[nic] try: ipv4_addresses = self.config["ip4_addr"][nic] except (KeyError, TypeError): ipv4_addresses = [] try: ipv6_addresses = self.config["ip6_addr"][nic] except (KeyError, TypeError): ipv6_addresses = [] net = libioc.Network.Network( jail=self, nic=nic, ipv4_addresses=ipv4_addresses, ipv6_addresses=ipv6_addresses, bridge=bridge, logger=self.logger ) networks.append(net) return networks def _write_hook_script(self, hook_name: str, command_string: str) -> None: file = self.get_hook_script_path(hook_name) existed = os.path.isfile(file) if hook_name in ["created", "poststart", "prestop"]: _identifier = str(shlex.quote(self.identifier)) _jls_command = f"/usr/sbin/jls -j {_identifier} jid" command_string = ( "IOC_JID=" f"$({_jls_command} 2>&1 || echo -1)" "\n" + command_string ) if hook_name == "poststop": command_string = ( "[ -f \"$(dirname $0)/.env\" ] && " ". \"$(dirname $0)/.env\"" "\n" ) + command_string with open(file, "w") as f: f.write("\n".join([ "#!/bin/sh", command_string ])) if existed is False: shutil.chown(file, "root", "wheel") os.chmod(file, 0o755) # nosec: executable script @property def launch_script_dir(self) -> str: """Return the launch-scripts directory path of the jail.""" return f"{self.jail.dataset.mountpoint}/launch-scripts" @property def script_env_path(self) -> str: """Return the absolute path to the jail script env file.""" return f"{self.launch_script_dir}/.env" def get_hook_script_path(self, hook_name: str) -> str: """Return the absolute path to the hook script file.""" return f"{self.jail.launch_script_dir}/{hook_name}.sh" def _start_vimage_network(self) -> typing.Tuple[ 'libioc.Network.CreatedCommandList', 'libioc.Network.JailCommandList' ]: self.logger.debug("Starting VNET/VIMAGE") created: typing.List[str] = [] start: typing.List[str] = [] for network in self.networks: _created, _start = network.setup() created += _created start += _start return created, start def _stop_network(self) -> typing.List[str]: if self.config["vnet"]: return self._stop_vimage_network() else: return self._stop_non_vimage_network() def _stop_non_vimage_network(self) -> typing.List[str]: commands: typing.List[str] = [] for protocol in (4, 6,): config_value = self.config[f"ip{protocol}_addr"] if config_value is None: return commands for nic, addresses in config_value.items(): if addresses is None: continue for address in addresses: if isinstance(address, str): # skip DHCP and ACCEPT_RTADV continue inet = "inet" if (protocol == 4) else "inet6" commands.append( f"/sbin/ifconfig {nic} {inet} {address} remove" ) return commands def _stop_vimage_network(self) -> typing.List[str]: commands: typing.List[str] = [] for network in self.networks: commands += network.teardown() return commands def _configure_localhost_commands(self) -> typing.List[str]: return ["/sbin/ifconfig lo0 localhost"] def _get_resource_limits_commands(self) -> typing.List[str]: commands: typing.List[str] = [] if self.config['rlimits'] is False: self.logger.verbose("Resource limits disabled") return commands for key in libioc.Config.Jail.Properties.ResourceLimit.properties: try: rlimit_prop = self.config[key] if rlimit_prop.is_unset is True: continue except (KeyError, AttributeError): continue commands.append(" ".join([ "/usr/bin/rctl", "-a", f"jail:{self.identifier}:{key}:{rlimit_prop.limit_string}" ])) return commands def _clear_resource_limits(self) -> typing.List[str]: if self.config['rlimits'] is False: return [] self.logger.verbose("Clearing resource limits") return [f"/usr/bin/rctl -r jail:{self.identifier} 2>/dev/null || true"] @property def _allow_mount(self) -> int: if self._allow_mount_zfs == 1: return 1 return int(self._get_value("allow_mount")) @property def _allow_mount_zfs(self) -> int: if self.config["jail_zfs"] is True: return 1 return int(self._get_value("allow_mount_zfs")) def _configure_routes_commands(self) -> typing.List[str]: defaultrouter = self.config["defaultrouter"] defaultrouter6 = self.config["defaultrouter6"] commands: typing.List[str] = [] if defaultrouter is not None: commands += list(defaultrouter.apply(jail=self)) if defaultrouter6 is not None: commands += list(defaultrouter6.apply(jail=self)) if len(commands) == 0: self.logger.spam("no static routes configured") return commands def require_jail_is_template(self, log_errors: bool=True) -> None: """Raise JailIsTemplate exception if the jail is a template.""" if self.config['template'] is False: raise libioc.errors.JailNotTemplate( jail=self, logger=(self.logger if log_errors else None) ) def require_jail_match_hostid(self, log_errors: bool=True) -> None: """Raise JailIsTemplate exception if the jail is a template.""" if self.hostid_check_ok is False: raise libioc.errors.JailHostIdMismatch( jail=self, host_hostid=self.host.id, logger=(self.logger if log_errors else None) ) @property def hostid_check_ok(self) -> bool: """Return true if the hostid check passes.""" if self.config["hostid_strict_check"] is False: self.logger.spam("hostid_strict_check is disabled") return True jail_hostid = self.config["hostid"] if (jail_hostid is None) or (jail_hostid == self.host.id): return True return False def require_storage_backend(self, log_errors: bool=True) -> None: """Raise if the jail was not initialized with a storage backend.""" if self.storage_backend is None: raise Exception("The jail has no storage backend.") def require_jail_not_template(self, log_errors: bool=True) -> None: """Raise JailIsTemplate exception if the jail is a template.""" if self.config['template'] is True: raise libioc.errors.JailIsTemplate( jail=self, logger=(self.logger if log_errors else None) ) def require_jail_not_existing(self, log_errors: bool=True) -> None: """Raise JailAlreadyExists exception if the jail already exists.""" if self.exists: raise libioc.errors.JailAlreadyExists( jail=self, logger=(self.logger if log_errors else None) ) def require_jail_existing(self, log_errors: bool=True) -> None: """Raise JailDoesNotExist exception if the jail does not exist.""" if not self.exists: raise libioc.errors.JailDoesNotExist( jail=self, logger=(self.logger if log_errors else None) ) def require_jail_stopped(self, log_errors: bool=True) -> None: """Raise JailAlreadyRunning exception if the jail is running.""" if self.running is not False: raise libioc.errors.JailAlreadyRunning( jail=self, logger=(self.logger if log_errors else None) ) def require_jail_running(self, log_errors: bool=True) -> None: """Raise JailNotRunning exception if the jail is stopped.""" if not self.running: raise libioc.errors.JailNotRunning( jail=self, logger=(self.logger if log_errors else None) ) def _teardown_mounts(self) -> typing.List[str]: commands: typing.List[str] = [] fstab_destinations = [line["destination"] for line in self.fstab] system_mountpoints = list(filter( os.path.isdir, map( self._get_absolute_path_from_jail_asset, [ "/dev/fd", "/dev", "/proc", "/root/compat/linux/proc", "/root/etcupdate", "/root/usr/ports", "/root/usr/src", "/tmp" # nosec: B108 ] ) )) mountpoints = fstab_destinations + system_mountpoints commands.append(" ".join(libioc.helpers.umount_command( mountpoints, force=True, ignore_error=True ))) commands.append(" ".join(libioc.helpers.umount_command( ["-a", "-F", self.fstab.path], force=True, ignore_error=True ))) if self.config.legacy is True: commands.append(" | ".join([ "mount -t nullfs", "sed -r 's/(.+) on (.+) \\(nullfs, .+\\)$/\\2/'", f"grep '^{self.root_dataset.mountpoint}/'", "xargs umount" ])) return commands def _get_absolute_path_from_jail_asset( self, value: str ) -> libioc.Types.AbsolutePath: return libioc.Types.AbsolutePath(f"{self.root_path}{value}") def _resolve_name(self, text: str) -> str: if (text is None) or (len(text) == 0): raise libioc.errors.JailNotSupplied(logger=self.logger) resource_selector = libioc.ResourceSelector.ResourceSelector( name=text, logger=self.logger ) root_datasets = resource_selector.filter_datasets(self.host.datasets) for datasets_key, datasets in root_datasets.items(): for dataset in list(datasets.jails.children): dataset_name = str( dataset.name[(len(datasets.jails.name) + 1):] ) humanreadable_name = libioc.helpers.to_humanreadable_name( dataset_name ) possible_names = [dataset_name, humanreadable_name] if resource_selector.name in possible_names: return dataset_name raise libioc.errors.JailNotFound(text, logger=self.logger) @property def name(self) -> str: """Return the configured jail id.""" return str(self.config["id"]) @property def full_name(self) -> str: """ Return the full identifier of a jail. When more than one root dataset is managed by iocage, the full source and name are returned. Otherwise just the name. For example `mydataset/jailname` or just `jailname`. """ if len(self.host.datasets) > 1: return f"{self.source}/{self.name}" else: return self.name @property def humanreadable_name(self) -> str: """ Return the human-readable identifier to print in logs and CLI output. Whenever a Jail is found to have a UUID as identifier, a shortened string of the first 8 characters is returned """ try: return str(libioc.helpers.to_humanreadable_name(self.name)) except KeyError: raise libioc.errors.JailUnknownIdentifier( logger=self.logger ) @property def stopped(self) -> bool: """Return True if a jail is stopped.""" return self.running is not True @property def running(self) -> bool: """Return True if a jail is running.""" return self.jid is not None @property def jid(self) -> typing.Optional[int]: """Return a jails JID if it is running or None.""" if "_state" not in object.__dir__(self): # force state init when jid was requested self._init_state() try: return int(self.state["jid"]) except (KeyError, TypeError): return None @property def env(self) -> typing.Dict[str, str]: """Return the environment variables for hook scripts.""" jail_env: typing.Dict[str, str] if self.config["exec_clean"] is False: jail_env = os.environ.copy() else: jail_env = {} for prop in self.config.all_properties: prop_name = f"IOC_{prop.replace(".", "_").upper()}" jail_env[prop_name] = str(self.config[prop]) jail_env["IOC_JAIL_PATH"] = self.root_dataset.mountpoint jail_env["IOC_JID"] = str(self.jid) jail_env["PATH"] = ":".join(( "/sbin", "/bin", "/usr/sbin", "/usr/bin", "/usr/local/sbin", "/usr/local/bin", )) return jail_env @property def identifier(self) -> str: """Return the jail id used in snapshots, jls, etc.""" config = object.__getattribute__(self, 'config') return f"{self.source}-{config["id"]}" @property def release(self) -> 'libioc.Release.ReleaseGenerator': """Return the libioc.Release instance linked with the jail.""" return libioc.Release.ReleaseGenerator( name=self.config["release"], root_datasets_name=self.root_datasets_name, logger=self.logger, host=self.host, zfs=self.zfs ) @property def release_snapshot(self) -> libzfs.ZFSSnapshot: """Return the matching release verion snaphsot.""" snapshot: libzfs.ZFSSnapshot = self.release.current_snapshot return snapshot def __getattribute__(self, key: str) -> typing.Any: """Get an attribute from the jail, state or configuration.""" try: return object.__getattribute__(self, key) except AttributeError: pass if "_state" in object.__dir__(self): try: return object.__getattribute__(self, "state")[key] except (AttributeError, KeyError): pass raise AttributeError(f"Jail property {key} not found") def __dir__(self) -> typing.List[str]: """Get all accessible properties of a jail.""" properties = set() for prop in dict.__dir__(self): if not prop.startswith("_"): properties.add(prop) return list(properties) def __eq__(self, other: typing.Any) -> bool: """ Compare two Jails by their name. The jail is identified by its full name, including the iocage root dataset name in case there is more than one enabled on the host. """ if isinstance(other, JailGenerator): return False return (self.full_name == other.full_name) is True class Jail(JailGenerator): """Synchronous wrapper of JailGenerator.""" def start( # noqa: T484 self, *args, **kwargs ) -> typing.List['libioc.events.IocEvent']: """Start the jail.""" return list(JailGenerator.start(self, *args, **kwargs)) def stop( # noqa: T484 self, *args, **kwargs ) -> typing.List['libioc.events.IocEvent']: """Stop the jail.""" return list(JailGenerator.stop(self, *args, **kwargs)) def rename( # noqa: T484 self, *args, **kwargs ) -> typing.List['libioc.events.IocEvent']: """Rename the jail.""" return list(JailGenerator.rename(self, *args, **kwargs)) def _update_fstab_paths( # noqa: T484 self, *args, **kwargs ) -> typing.List['libioc.events.IocEvent']: """Update a path in the whole fstab file.""" return list(JailGenerator._update_fstab_paths(self, *args, **kwargs)) def destroy( # noqa: T484 self, force: bool=False ) -> typing.List['libioc.events.IocEvent']: """ Destroy a Jail and it's datasets. Args: force (bool): (default=False) This flag enables whether an existing jail should be shut down before destroying the dataset. By default destroying a jail requires it to be stopped. """ return list(JailGenerator.destroy(self, force=force)) def fork_exec( # noqa: T484 self, command: str, passthru: bool=False, event_scope: typing.Optional['libioc.events.Scope']=None, dependant_jails_seen: typing.List['JailGenerator']=[], start_dependant_jails: bool=True, **temporary_config_override ) -> str: """ Start a jail, run a command and shut it down immediately. Args: command (string): The command to execute in the jail. passthru (bool): Execute commands in an interactive shell. event_scope (libioc.lib.events.Scope): (default=None) Provide an existing libiocage event scope or automatically create a new one instead. dependant_jails_seen (list[libioc.JailGenerator]): Jail depends can have circular dependencies. By passing a list of already started jails to the start command, iocage does not need to query their state, because they are known to be running already. This argument is internally used when starting a jails dependants recursively. start_dependant_jails (bool): When disabled, no dependant jails will be started. **temporary_config_override (dict(str, any)): Other named arguments temporary override JailConfig properties. For example: jail = libioc.JailGenerator("myjail") events = jail.fork_exec("ifconfig", vnet=False) print(list(events)) """ events = JailGenerator.fork_exec( self, command=command, passthru=passthru, event_scope=event_scope, dependant_jails_seen=dependant_jails_seen, start_dependant_jails=start_dependant_jails, **temporary_config_override ) for event in events: if isinstance(event, libioc.events.JailLaunch) and event.done: return str(event.stdout)
# Copyright (c) 2017-2019, Stefan Grönke # Copyright (c) 2014-2018, iocage # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted providing that the following conditions # are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR # IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, # STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING # IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. """iocage Jail module.""" import typing import os import random import shlex import shutil import libzfs import freebsd_sysctl import libioc.Types import libioc.errors import libioc.events import libioc.helpers import libioc.helpers_object import libioc.JailState import libioc.DevfsRules import libioc.Host import libioc.Config.Jail.JailConfig import libioc.Network import libioc.Release import libioc.Storage import libioc.Storage.NullFSBasejail import libioc.Storage.Standalone import libioc.Storage.ZFSBasejail import libioc.ZFSShareStorage import libioc.LaunchableResource import libioc.VersionedResource import libioc.Config.Jail.Properties.ResourceLimit import libioc.ResourceSelector import libioc.Config.Jail.File.Fstab class JailResource( libioc.LaunchableResource.LaunchableResource, libioc.VersionedResource.VersionedResource ): """Resource that represents a jail.""" _jail: 'JailGenerator' _fstab: 'libioc.Config.Jail.File.Fstab.Fstab' host: 'libioc.Host.HostGenerator' root_datasets_name: typing.Optional[str] def __init__( self, jail: 'JailGenerator', dataset: typing.Optional[libzfs.ZFSDataset]=None, dataset_name: typing.Optional[str]=None, config_type: str="auto", config_file: typing.Optional[str]=None, logger: typing.Optional['libioc.Logger.Logger']=None, zfs: typing.Optional[libioc.ZFS.ZFS]=None, host: typing.Optional['libioc.Host.HostGenerator']=None, fstab: typing.Optional['libioc.Config.Jail.File.Fstab.Fstab']=None, root_datasets_name: typing.Optional[str]=None, ) -> None: self.host = libioc.helpers_object.init_host(self, host) self.root_datasets_name = root_datasets_name if fstab is not None: self._fstab = fstab if jail is not None: self._jail = jail libioc.LaunchableResource.LaunchableResource.__init__( self, dataset=dataset, dataset_name=dataset_name, config_type=config_type, config_file=config_file, logger=logger, zfs=zfs ) @property def jail(self) -> 'JailGenerator': """ Jail instance that belongs to the resource. Usually the resource becomes inherited from the jail itself. It can still be used linked to a foreign jail by passing jail as named attribute to the __init__ function """ try: return self._jail except AttributeError: pass # is instance of Jail itself if isinstance(self, JailGenerator): jail = self # type: JailGenerator return jail raise Exception("This resource is not a jail or not linked to one") @property def fstab(self) -> 'libioc.Config.Jail.File.Fstab.Fstab': """ Memoized fstab wrapper of a Jail. The fstab file is stored in the top level of a Jails dataset """ try: return self._fstab except AttributeError: pass try: release = self.release except AttributeError: release = None jail = self.jail fstab = libioc.Config.Jail.File.Fstab.Fstab( jail=jail, release=release, logger=self.logger, host=jail.host ) self._fstab = fstab return fstab @property def dataset_name(self) -> str: """ Name of the jail base ZFS dataset. If the resource has no dataset or dataset_name assigned yet, the jail id is used to find name the dataset """ try: return str(self._assigned_dataset_name) except AttributeError: pass try: return str(self._dataset.name) except AttributeError: pass return self._dataset_name_from_jail_name @dataset_name.setter def dataset_name(self, value: str) -> None: """ Override a jail's dataset name. This will cause Jail.dataset to point to this specific dataset instead of an auto-detected one to enable referencing jails from datasets that are not managed by iocage """ self._dataset_name = value def autoset_dataset_name(self) -> None: """ Automatically determine and set the dataset_name. When a jail was created with the new attribute enabled, the dataset might not exist, so that a dataset_name lookup would fail. Calling this method sets the jails dataset_name to a child dataset of the hosts jails dataset with the jails name. """ if self.root_datasets_name is None: base_name = self.host.datasets.main.jails.name else: base_name = self.host.datasets.__getitem__( self.root_datasets_name ).jails.name self.dataset_name = f"{base_name}/{self.name}" @property def _dataset_name_from_jail_name(self) -> str: jail_id = str(self.jail.config["id"]) if jail_id is None: raise libioc.errors.JailUnknownIdentifier() if self.root_datasets_name is None: base_name = self.host.datasets.main.jails.name else: try: base_name = self.host.datasets.__getitem__( self.root_datasets_name ).jails.name except KeyError: raise libioc.errors.SourceNotFound(logger=self.logger) return f"{base_name}/{jail_id}" @property def source(self) -> str: """Return the name of the jails source root datasets.""" return str( self.host.datasets.find_root_datasets_name(self.dataset_name) ) def get(self, key: str) -> typing.Any: """Get a config value from the jail or defer to its resource.""" try: return libioc.Resource.Resource.get(self, key) except AttributeError: pass return self.jail.config[key] class JailGenerator(JailResource): """ iocage unit orchestrates a jail's configuration and manages state. Jails are represented as a zfs dataset ``zpool/iocage/jails/<NAME>`` Directory Structure: zpool/iocage/jails/<NAME>: The jail's dataset containing it's configuration and root dataset. iocage-legacy used to store a jails configuration as ZFS properties on this dataset. Even though the modern JSON config mechanism is preferred. zpool/iocage/jails/<NAME>/root: This directory is the dataset used as jail's root when starting a jail. Usually the clone source of a root dataset is a snapshot of the release's root dataset. zpool/iocage/jails/<NAME>/config.json: Jails configured with the latest configuration style store their information in a JSON file. When this file is found in the jail's dataset, libiocage assumes the jail to be a JSON-style jail and ignores other configuration mechanisms. zpool/iocage/jails/<NAME>/config: Another compatible configuration mechanism is a UCL file. It's content is only taken into account if no JSON or ZFS configuration was found. Jail Types: Standalone: The /root dataset gets cloned from a release at creation time. It it not affected by changes to the Release and persists all data within the jail. NullFS Basejail: The fastest method to spawn a basejail by mounting read-only directories from the release's root dataset by creating a snapshot of the release on each boot of the jail. When a release is updated, the jail is updated as well on the next reboot. This type is the one used by the Python implementation of libioc. ZFS Basejail: Legacy basejails used to clone individual datasets from a release (stored in ``zpool/iocage/base/<RELEASE>``). """ _class_storage = libioc.Storage.Storage _state: typing.Optional['libioc.JailState.JailState'] _relative_hook_script_dir: str _provisioner: 'libioc.Provisioning.Prototype' def __init__( self, data: typing.Union[str, typing.Dict[str, typing.Any]]={}, dataset: typing.Optional[libzfs.ZFSDataset]=None, dataset_name: typing.Optional[str]=None, config_type: str="auto", config_file: typing.Optional[str]=None, logger: typing.Optional['libioc.Logger.Logger']=None, zfs: typing.Optional['libioc.ZFS.ZFS']=None, host: typing.Optional['libioc.Host.Host']=None, fstab: typing.Optional['libioc.Config.Jail.File.Fstab.Fstab']=None, root_datasets_name: typing.Optional[str]=None, new: bool=False ) -> None: """ Initialize a Jail. Args: data (string|dict): Jail configuration dict or jail name as string identifier. zfs (libzfs.ZFS): (optional) Inherit an existing libzfs.ZFS() instance from ancestor classes host (libioc.Host): (optional) Inherit an existing Host instance from ancestor classes logger (libioc.Logger): (optional) Inherit an existing Logger instance from ancestor classes """ self.logger = libioc.helpers_object.init_logger(self, logger) self.zfs = libioc.helpers_object.init_zfs(self, zfs) self.host = libioc.helpers_object.init_host(self, host) self._relative_hook_script_dir = "/.iocage" if isinstance(data, str): data = dict(id=data) if "id" in data.keys(): data["id"] = self._resolve_name(data["id"]) JailResource.__init__( self, jail=self, dataset=dataset, dataset_name=dataset_name, config_type=config_type, config_file=config_file, logger=self.logger, zfs=self.zfs, host=self.host, fstab=fstab, root_datasets_name=root_datasets_name ) if not new and (("id" not in data) or (data["id"] is None)): try: # try to get the Jail name from it's dataset_name data["id"] = self.dataset_name.split("/").pop() except libioc.errors.JailUnknownIdentifier: pass self.config = libioc.Config.Jail.JailConfig.JailConfig( host=self.host, jail=self, logger=self.logger ) self.config.clone(data) self.storage = self._class_storage( safe_mode=False, jail=self, logger=self.logger, zfs=self.zfs ) if new is False: self.config.read(data=self.read_config(), skip_on_error=True) if self.config["id"] is None: self.config["id"] = self.dataset_name.split("/").pop() @property def state(self) -> 'libioc.JailState.JailState': """ Memoized JailState. This object holds information about the jail state. The information is memoized on first access because the lookup is expensive. Please keep in mind to update the object when executing operations that potentially change a jails state. """ if "_state" not in object.__dir__(self): return self._init_state() elif object.__getattribute__(self, "_state") is None: return self._init_state() return object.__getattribute__(self, "_state") @state.setter def state(self, value: 'libioc.JailState.JailState') -> None: """ Return the jails JailState object. A public interface to set a jails state. This behavior is part of a performance optimization when dealing with large numbers of jails. """ object.__setattr__(self, '_state', value) @property def provisioner(self) -> 'libioc.Provisioning.prototype.Provisioner': """ Return the jails Provisioner instance. The provisioner itself is going to interpret the jails configuration dynamically, so that the Provisioner instance can be memoized. """ try: return self._provisioner except AttributeError: pass import libioc.Provisioning self._provisioner = libioc.Provisioning.Provisioner(jail=self) return self._provisioner def _init_state(self) -> 'libioc.JailState.JailState': state = libioc.JailState.JailState( self.identifier, logger=self.logger ) self.state = state state.query() return state def start( self, quick: bool=False, passthru: bool=False, single_command: typing.Optional[str]=None, event_scope: typing.Optional['libioc.events.Scope']=None, dependant_jails_seen: typing.List['JailGenerator']=[], start_dependant_jails: bool=True ) -> typing.Generator['libioc.events.IocEvent', None, None]: """ Start the jail. Args: quick (bool): Skip several operations that are not required when a jail was unchanged since its last start (for example when restarting it). passthru (bool): Execute commands in an interactive shell. single_command (str): When set the jail is launched non-persistent. The startup cycle reduces to the `prestart`, `command` and `poststop` hooks with the singe_command being executed in a /bin/sh context. event_scope (libioc.lib.events.Scope): (default=None) Provide an existing libiocage event scope or automatically create a new one instead. dependant_jails_seen (list[libioc.JailGenerator]): Jail depends can have circular dependencies. By passing a list of already started jails to the start command, iocage does not need to query their state, because they are known to be running already. This argument is internally used when starting a jails dependants recursively. start_dependant_jails (bool): When disabled, no dependant jails will be started. """ self.require_jail_existing() self.require_jail_stopped() self.require_jail_match_hostid() try: yield from self.config["resolver"].apply( jail=self, event_scope=event_scope ) except Exception as e: raise e events: typing.Any = libioc.events jailLaunchEvent = events.JailLaunch(jail=self, scope=event_scope) dependant_jails_started: typing.List[JailGenerator] = [] if start_dependant_jails is True: dependant_jails_seen.append(self) DependantsStartEvent = libioc.events.JailDependantsStart for event in self._start_dependant_jails( self.config["depends"], event_scope=event_scope, dependant_jails_seen=dependant_jails_seen ): if isinstance(event, DependantsStartEvent) is True: if event.done and (event.error is None): dependant_jails_started.extend(event.started_jails) yield event self._ensure_script_dir() jail_start_script_dir = "".join([ self.root_dataset.mountpoint, self._relative_hook_script_dir ]) if os.path.isdir(jail_start_script_dir) is False: os.makedirs(jail_start_script_dir, 0o755) exec_prestart: typing.List[str] = self._get_resource_limits_commands() exec_start: typing.List[str] = [ f". {self._relative_hook_script_dir}/.env" ] exec_created: typing.List[str] = [ f"echo \"export IOC_JID=$IOC_JID\" > {self.script_env_path}", "set -eu", ] exec_poststart: typing.List[str] = [] if self.config["vnet"]: _created, _start = self._start_vimage_network() exec_created += _created exec_start += _start exec_start += self._configure_localhost_commands() exec_start += self._configure_routes_commands() if self.host.ipfw_enabled is True: exec_start.append("service ipfw onestop") if self.config["jail_zfs"] is True: share_storage = self._zfs_share_storage share_storage.mount_zfs_shares() exec_start += share_storage.read_commands("jail") exec_created += share_storage.read_commands() if self.config["exec_prestart"] is not None: exec_prestart += [self.config["exec_prestart"]] if self.config["exec_created"] is not None: exec_created += [self.config["exec_created"]] if self.config["exec_start"] is not None and (single_command is None): exec_start += [self.config["exec_start"]] if self.config["exec_poststart"] is not None: exec_poststart += [self.config["exec_poststart"]] self._write_hook_script( "prestart", self._wrap_hook_script_command_string( exec_prestart, ignore_errors=False ) ) self._write_hook_script( "created", self._wrap_hook_script_command_string( exec_created, ) ) self._write_hook_script( "start", self._wrap_hook_script_command_string( exec_start, jailed=True, ignore_errors=False ) ) self._write_hook_script( "poststart", self._wrap_hook_script_command_string([ "set -eu", "/bin/echo running exec.created hook on the host", f"/bin/sh {self.get_hook_script_path('created')} 2>&1", "/bin/echo running exec.start hook in the jail", ( f"/usr/sbin/jexec {self.identifier} " f"{self._relative_hook_script_dir}/start.sh" ), "/bin/echo running exec.poststart hook on the host", ] + exec_poststart) ) yield jailLaunchEvent.begin() def _stop_failed_jail( ) -> typing.Generator['libioc.events.IocEvent', None, None]: jails_to_stop = [self] if start_dependant_jails is True: jails_to_stop.extend(list(reversed(dependant_jails_started))) for jail_to_stop in jails_to_stop: yield from jail_to_stop.stop( force=True, event_scope=jailLaunchEvent.scope ) jailLaunchEvent.add_rollback_step(_stop_failed_jail) if self.is_basejail is True: self.storage_backend.apply(self.storage, self.release) if quick is False: unknown_config_parameters = list( self.config.unknown_config_parameters ) if len(unknown_config_parameters) > 0: _unused_parameters = str(", ".join(unknown_config_parameters)) self.logger.warn( f"Unused JailConfig parameters: {_unused_parameters}" ) self._save_autoconfig() try: self._prepare_stop() if single_command is None: stdout, stderr, returncode = self._launch_persistent_jail( passthru=passthru ) else: stdout, stderr, returncode = self._launch_single_command_jail( single_command, passthru=passthru ) if returncode != 0: raise libioc.errors.JailLaunchFailed( jail=self, logger=self.logger ) except libioc.errors.IocException as e: yield from jailLaunchEvent.fail_generator(e) raise e yield jailLaunchEvent.end(stdout=stdout) @property def _zfs_share_storage( self ) -> libioc.ZFSShareStorage.QueuingZFSShareStorage: return libioc.ZFSShareStorage.QueuingZFSShareStorage( jail=self, logger=self.logger ) def _start_dependant_jails( self, terms: libioc.Filter.Terms, dependant_jails_seen: typing.List['JailGenerator'], event_scope: typing.Optional['libioc.events.Scope']=None ) -> typing.Generator['libioc.events.IocEvent', None, None]: jailDependantsStartEvent = libioc.events.JailDependantsStart( jail=self, scope=event_scope ) started_jails: typing.List[JailGenerator] = [] yield jailDependantsStartEvent.begin() _depends = self.config["depends"] if len(_depends) == 0: yield jailDependantsStartEvent.skip("No dependant jails") return dependant_jails = sorted( libioc.Jails.JailsGenerator( filters=_depends, host=self.host, logger=self.logger, zfs=self.zfs ), key=lambda x: x.config["priority"] ) for dependant_jail in dependant_jails: if dependant_jail == self: self.logger.warn(f"The jail {self.name} depends on itself") continue if dependant_jail in dependant_jails_seen: self.logger.spam( f"Circular dependency {dependant_jail.name} - skipping" ) continue dependant_jails_seen.append(dependant_jail) jailDependantStartEvent = libioc.events.JailDependantStart( jail=dependant_jail, scope=jailDependantsStartEvent.scope ) yield jailDependantStartEvent.begin() dependant_jail.state.query() if dependant_jail.running is True: yield jailDependantStartEvent.skip("already running") continue try: yield from dependant_jail.start( event_scope=jailDependantStartEvent.scope, dependant_jails_seen=dependant_jails_seen ) except libioc.errors.IocException as err: yield jailDependantStartEvent.fail(err) yield from jailDependantsStartEvent.fail_generator(err) raise err yield jailDependantStartEvent.end() started_jails.append(dependant_jail) # revert start of previously started dependants after failure def _revert_start( jail: JailGenerator ) -> typing.Callable[ [], typing.Generator['libioc.events.IocEvent', None, None] ]: def revert_method() -> typing.Generator[ 'libioc.events.IocEvent', None, None ]: yield from jail.stop(force=True) return revert_method jailDependantsStartEvent.add_rollback_step( _revert_start(dependant_jail) ) yield jailDependantsStartEvent.end( started_jails=started_jails ) def _run_poststop_hook_manually(self) -> None: self.logger.debug("Running poststop hook manually") libioc.helpers.exec(self.get_hook_script_path("poststop")) def _wrap_jail_command( self, commands: typing.Optional[typing.List[str]] ) -> typing.List[str]: """Wrap a jail hook command for a host hook script.""" if commands is None: return [] EOF_IDENTIFIER = f"EOF{random.getrandbits(64)}" output: typing.List[str] = [ "set -eu", "echo 'Executing jail start scripts'", "jexec -j {self.identifier} /bin/sh <<{EOF_IDENTIFIER}" ] + commands + [ EOF_IDENTIFIER, "set +e" ] return output def _wrap_hook_script_command( self, commands: typing.Optional[typing.Union[str, typing.List[str]]], ignore_errors: bool=True, jailed: bool=False, # ToDo: remove unused argument write_env: bool=True ) -> typing.List[str]: if isinstance(commands, str): return [commands] elif commands is None: return [] else: return commands def _wrap_hook_script_command_string( self, commands: typing.Optional[typing.Union[str, typing.List[str]]], ignore_errors: bool=True, jailed: bool=False, write_env: bool=True ) -> str: return "\n".join(self._wrap_hook_script_command( commands=commands, ignore_errors=ignore_errors, jailed=jailed, write_env=write_env )) def fork_exec( self, command: str, passthru: bool=False, event_scope: typing.Optional['libioc.events.Scope']=None, start_dependant_jails: bool=True, dependant_jails_seen: typing.List['JailGenerator']=[], **temporary_config_override: typing.Any ) -> typing.Generator['libioc.events.IocEvent', None, None]: """ Start a jail, run a command and shut it down immediately. Args: command (string): The command to execute in the jail. passthru (bool): Execute commands in an interactive shell. event_scope (libioc.lib.events.Scope): (default=None) Provide an existing libiocage event scope or automatically create a new one instead. dependant_jails_seen (list[libioc.JailGenerator]): Jail depends can have circular dependencies. By passing a list of already started jails to the start command, iocage does not need to query their state, because they are known to be running already. This argument is internally used when starting a jails dependants recursively. start_dependant_jails (bool): When disabled, no dependant jails will be started. **temporary_config_override (dict(str, any)): Other named arguments temporary override JailConfig properties. For example: jail = libioc.JailGenerator("myjail") events = jail.fork_exec("ifconfig", vnet=False) print(list(events)) """ self.require_jail_existing() self.require_jail_stopped() original_config = self.config config_data = original_config.data for key, value in temporary_config_override.items(): config_data[key] = value self.config = libioc.Config.Jail.JailConfig.JailConfig( host=self.host, jail=self, logger=self.logger ) self.config.clone(original_config.data) try: fork_exec_events = JailGenerator.start( self, single_command=command, passthru=passthru, event_scope=event_scope, dependant_jails_seen=dependant_jails_seen, start_dependant_jails=start_dependant_jails ) for event in fork_exec_events: yield event finally: self.config = original_config def _run_hook(self, hook_name: str) -> typing.Optional[ libioc.helpers.CommandOutput ]: """ Execute a jail hook. Hooks are executed during the start and stop process of the jail. """ key = f"exec_{hook_name}" value = str(self.config.get(key, "/usr/bin/true")) if value == "/usr/bin/true": return None self.logger.verbose( f"Running {hook_name} hook for {self.humanreadable_name}" ) lex = shlex.shlex(value) # noqa: T484 lex.whitespace_split = True command = list(lex) if (hook_name == "start") or (hook_name == "stop"): return self.exec( command, passthru=False ) # ToDo: Deprecate and remove this method raise NotImplementedError("_run_hook only supports start/stop") def _ensure_script_dir(self) -> None: """Ensure that the launch scripts dir exists.""" realpath = os.path.realpath(self.launch_script_dir) if realpath.startswith(self.dataset.mountpoint) is False: raise libioc.errors.SecurityViolationConfigJailEscape( file=realpath ) if os.path.isdir(realpath) is False: os.makedirs(realpath, 0o755) def _prepare_stop(self) -> None: self._ensure_script_dir() exec_prestop = [] exec_stop = [] exec_poststop = self._teardown_mounts() + self._clear_resource_limits() # ToDo: self.config.get("exec_prestop", "") if self.config["exec_prestop"] is not None: exec_prestop.append(self.config["exec_prestop"]) if self.config["exec_stop"] is not None: exec_stop.append(self.config["exec_stop"]) exec_poststop = self._stop_network() + exec_poststop if self.config["exec_poststop"] is not None: exec_poststop.append(self.config["exec_poststop"]) if self.config["jail_zfs"] is True: share_storage = libioc.ZFSShareStorage.QueuingZFSShareStorage( jail=self, logger=self.logger ) share_storage.umount_zfs_shares() exec_stop += share_storage.read_commands("jail") exec_poststop += share_storage.read_commands() if self.running and (os.path.isfile(self.script_env_path) is False): # when a jail was started from other iocage variants self._write_temporary_script_env() exec_poststop.append(f"rm \"{shlex.quote(self.script_env_path)}\"") self._write_hook_script( "prestop", self._wrap_hook_script_command_string(exec_prestop) ) self._write_hook_script( "stop", self._wrap_hook_script_command_string( exec_stop, jailed=True, ignore_errors=True ) ) self._write_hook_script( "poststop", self._wrap_hook_script_command_string( exec_poststop, write_env=False, ignore_errors=True ) ) def stop( self, force: bool=False, event_scope: typing.Optional['libioc.events.Scope']=None, log_errors: bool=True ) -> typing.Generator['libioc.events.IocEvent', None, None]: """ Stop a jail. Args: force (bool): (default=False) Ignores failures and enforces teardown if True. event_scope (libioc.lib.events.Scope): (default=None) Provide an existing libiocage event scope or automatically create a new one instead. log_errors (bool): (default=True) When disabled errors are not passed to the logger. This is useful in scripted contexts when then stop operation was executed to enforce a defined jail state. """ if force is False: self.require_jail_existing(log_errors=log_errors) self.require_jail_running(log_errors=log_errors) events: typing.Any = libioc.events jailDestroyEvent = events.JailDestroy(self, scope=event_scope) self._prepare_stop() yield jailDestroyEvent.begin() try: self._write_jail_conf(force=force) self._destroy_jail(log_errors=log_errors) except Exception as e: if force is True: yield jailDestroyEvent.skip() self.logger.debug( "Manually executing prestop and poststop hooks" ) try: for hook_name in ["prestop", "poststop"]: libioc.helpers.exec( command=[self.get_hook_script_path(hook_name)] ) except Exception as e: self.logger.warn(str(e)) else: yield jailDestroyEvent.fail(e) raise e yield jailDestroyEvent.end() try: self.state.query() except Exception as e: if force is True: self.logger.warn(str(e)) else: raise e def _write_temporary_script_env(self) -> None: self.logger.debug( f"Writing the hook script .env file {self.script_env_path}" f" for JID {self.jid}" ) self._ensure_script_dir() with open(self.script_env_path, "w") as f: f.write(f"export IOC_JID={self.jid}") def _write_jail_conf(self, force: bool=False) -> None: if force is True: stop_command = "/usr/bin/true" else: stop_command = ( f"[ -f \"{self._relative_hook_script_dir}/stop.sh\" ]" " || exit 0; " f". {self._relative_hook_script_dir}/stop.sh" ) content = "\n".join([ self.identifier + " {", ( "exec.prestop = " f"\"/bin/sh {self.get_hook_script_path('prestop')}\";" ), ( "exec.poststop = " f"\"/bin/sh {self.get_hook_script_path('poststop')}\";" ), ( f"exec.stop = \"{stop_command}\";" ), ( f"exec.jail_user = {self._get_value('exec_jail_user')};" ), "}" ]) self.logger.debug(f"Writing jail.conf file to {self._jail_conf_file}") with open(self._jail_conf_file, "w") as f: f.write(content) @property def _jail_conf_file(self) -> str: return f"{self.launch_script_dir}/jail.conf" def restart( self, shutdown: bool=False, force: bool=False, event_scope: typing.Optional['libioc.events.Scope']=None ) -> typing.Generator['libioc.events.IocEvent', None, None]: """Restart the jail.""" failed: bool = False jailRestartEvent = libioc.events.JailRestart( jail=self, scope=event_scope ) jailShutdownEvent = libioc.events.JailShutdown( jail=self, scope=jailRestartEvent.scope ) JailSoftShutdownEvent = libioc.events.JailSoftShutdown( jail=self, scope=jailRestartEvent.scope ) jailStartEvent = libioc.events.JailStart( jail=self, scope=jailRestartEvent.scope ) yield jailRestartEvent.begin() if shutdown is False: # soft stop yield JailSoftShutdownEvent.begin() try: self._run_hook("stop") yield JailSoftShutdownEvent.end() except libioc.errors.IocException: yield JailSoftShutdownEvent.fail(exception=False) # service start yield jailStartEvent.begin() try: self._run_hook("start") yield jailStartEvent.end() except libioc.errors.IocException: yield jailStartEvent.fail(exception=False) else: # full shutdown yield jailShutdownEvent.begin() try: for event in self.stop(): yield event yield jailShutdownEvent.end() except libioc.errors.IocException: failed = True yield jailShutdownEvent.fail(exception=False) if force is False: # only continue when force is enabled yield jailRestartEvent.fail(exception=False) return # start yield jailStartEvent.begin() try: for event in self.start(): yield event yield jailStartEvent.end() except libioc.errors.IocException: failed = True yield jailStartEvent.fail(exception=False) # respond to failure if failed is True: yield jailRestartEvent.fail(exception=False) return yield jailRestartEvent.end() def destroy( self, force: bool=False, force_stop: bool=False, event_scope: typing.Optional['libioc.events.Scope']=None ) -> typing.Generator['libioc.events.IocEvent', None, None]: """ Destroy a Jail and it's datasets. Args: force (bool): (default=False) This flag enables whether an existing jail should be shut down before destroying the dataset. By default destroying a jail requires it to be stopped. force_stop (bool): (default=False) A jail is force stopped when either the force_stop argument was set or the force option was enabled and the jail is running. When being enabled the argument invokes a full stop before destroying the jail. """ self.state.query() if event_scope is None: event_scope = libioc.events.Scope() _stop_jail = force_stop if force is False: self.require_jail_stopped() else: _stop_jail = (self.running is True) if _stop_jail is True: try: stop_events = JailGenerator.stop( self, force=True, event_scope=event_scope, log_errors=(force_stop is False) ) for event in stop_events: yield event except libioc.lib.errors.JailDestructionFailed: pass zfsDatasetDestroyEvent = libioc.events.ZFSDatasetDestroy( dataset=self.dataset, scope=event_scope ) yield zfsDatasetDestroyEvent.begin() try: self.zfs.delete_dataset_recursive(self.dataset) except Exception as e: zfsDatasetDestroyEvent.fail(e) raise e yield zfsDatasetDestroyEvent.end() def rename( self, new_name: str, event_scope: typing.Optional['libioc.events.Scope']=None ) -> typing.Generator['libioc.events.IocEvent', None, None]: """ Change the name of a jail. Args: new_name (str): The new name of a jail. It might not be used by another Jail and must differ from the current name. """ self.require_jail_existing() self.require_jail_stopped() self.require_storage_backend() if libioc.helpers.validate_name(new_name) is False: raise libioc.errors.InvalidJailName( name=new_name, logger=self.logger ) current_id = self.config["id"] current_mountpoint = self.dataset.mountpoint jailRenameEvent = libioc.events.JailRename( jail=self, current_name=current_id, new_name=new_name, scope=event_scope ) self.config["id"] = new_name # validates new_name yield jailRenameEvent.begin() self.logger.debug(f"Renaming jail {current_id} to {new_name}") def revert_id_change() -> None: self.config["id"] = current_id self.logger.debug(f"Jail id reverted to {current_id}") jailRenameEvent.add_rollback_step(revert_id_change) try: events = self.storage_backend.rename( self.storage, new_name=new_name, event_scope=jailRenameEvent.scope ) for event in events: yield jailRenameEvent.child_event(event) if event.error is not None: raise event.error except BaseException as e: yield jailRenameEvent.fail(e) raise e # Update fstab to the new dataset fstab_path_events = self._update_fstab_paths( current_mountpoint, event_scope=jailRenameEvent.scope ) for event in fstab_path_events: yield event yield jailRenameEvent.end() def _update_fstab_paths( self, old_path_prefix: str, new_path_prefix: typing.Optional[str]=None, event_scope: typing.Optional['libioc.events.Scope']=None ) -> typing.Generator['libioc.events.IocEvent', None, None]: """ Update a path in the whole fstab file. When no new_path_prefix is provided, the jail's root dataset is used. """ if new_path_prefix is None: _new_path_prefix = self.dataset.mountpoint else: _new_path_prefix = new_path_prefix jailFstabUpdateEvent = libioc.events.JailFstabUpdate( jail=self, scope=event_scope ) yield jailFstabUpdateEvent.begin() try: self.fstab.read_file() self.fstab.replace_path( old_path_prefix, _new_path_prefix ) self.fstab.save() yield jailFstabUpdateEvent.end() except BaseException as e: yield jailFstabUpdateEvent.fail(e) raise e def create( self, resource: typing.Optional[typing.Union[ 'JailGenerator', 'libioc.Release.ReleaseGenerator', str ]]=None ) -> None: """ Create a Jail from a given Resource. Args: resource (Jail or Release): The (new) jail is created from this resource. If no resource is specified, an empty dataset will be created """ if isinstance(resource, str): resource = libioc.Release(resource) if isinstance(resource, JailGenerator): self.create_from_template(template=resource) elif isinstance(resource, libioc.Release.ReleaseGenerator): self.create_from_release(release=resource) else: self.create_from_scratch() self._ensure_script_dir() def create_from_scratch( self ) -> None: """Create a new jail without any root dataset content.""" self._create_skeleton() def create_from_release( self, release: 'libioc.Release.ReleaseGenerator' ) -> None: """ Create a Jail from a Release. Args: resource (Release): The jail is created from the provided resource. This can be either another Jail or a Release. """ if release.fetched is False: raise libioc.errors.ReleaseNotFetched( name=release.name, logger=self.logger ) self.config["release"] = release.full_name self._create_from_resource(release) def create_from_template( self, template: 'JailGenerator' ) -> None: """Create a Jail from a template Jail.""" template.require_jail_is_template() existing_config_keys = list(self.config.keys()) for key in template.config.keys(): if key in (["id", "name", "template"] + existing_config_keys): continue self.config[key] = template.config[key] self.config['release'] = template.release.full_name self.config['basejail'] = template.config['basejail'] self.config['basejail_type'] = template.config['basejail_type'] self._create_from_resource(template) def promote(self) -> None: """Promote all datasets of the jail.""" self.zfs.promote_dataset(self.dataset, logger=self.logger) def clone_from_jail( self, source_jail: 'JailGenerator', event_scope: typing.Optional['libioc.events.Scope']=None ) -> typing.Generator['libioc.events.IocEvent', None, None]: """Create a Jail from another Jail.""" self.autoset_dataset_name() if event_scope is None: event_scope = libioc.events.Scope() yield from source_jail.clone_to_dataset( self.dataset_name, event_scope=event_scope ) self.config.clone(source_jail.config.data, skip_on_error=True) self.save() fstab_update_generator = self._update_fstab_paths( source_jail.root_dataset.mountpoint, event_scope=event_scope ) for event in fstab_update_generator: yield event def clone_to_dataset( self, destination_dataset_name: str, delete_existing: bool=False, event_scope: typing.Optional['libioc.events.Scope']=None ) -> typing.Generator['libioc.events.IocEvent', None, None]: """Clones the jails dataset to another dataset with the given name.""" jailCloneEvent = libioc.events.JailClone( jail=self, scope=event_scope ) yield jailCloneEvent.begin() try: self.zfs.clone_dataset( source=self.dataset, target=destination_dataset_name, delete_existing=delete_existing ) except Exception as e: err = libioc.errors.ZFSException( *e.args, logger=self.logger ) yield jailCloneEvent.fail(err) raise err yield jailCloneEvent.end() def _create_skeleton(self) -> None: if self.config["id"] is None: self.config["id"] = str(libioc.helpers.get_random_uuid()) self.require_jail_not_existing() self.logger.verbose( f"Creating jail '{self.config['id']}'" ) for key, value in self.config.data.items(): msg = f"{key} = {value}" self.logger.spam(msg, indent=1) self.create_resource() def _create_from_resource( self, resource: 'libioc.Resource.Resource' ) -> None: self._create_skeleton() backend = self.storage_backend if backend is not None: backend.setup(self.storage, resource) self.config["hostid"] = self.host.id self._update_fstab() self.save() @property def is_basejail(self) -> bool: """ Return True if a Jail is a basejail. If this is the case, parts of the jails dataset will be mounted from its release or upstream Jail (for example a Template) """ return self.config.get("basejail", False) is True @property def storage_backend(self) -> libioc.Storage.Storage: """ Return the jail storage abstraction class. Returns the class that represents the jails storage backend according to its configuration. """ if not self.is_basejail: return libioc.Storage.Standalone.StandaloneJailStorage if self.config["basejail_type"] == "nullfs": return libioc.Storage.NullFSBasejail.NullFSBasejailStorage if self.config["basejail_type"] == "zfs": return libioc.Storage.ZFSBasejail.ZFSBasejailStorage def save(self) -> None: """Permanently save a jail's configuration.""" self._write_config(self.config.data) self._save_autoconfig() def _save_autoconfig(self) -> None: """Save auto-generated files.""" self.rc_conf.save() self._update_fstab() def _update_fstab(self) -> None: if self.config["basejail_type"] == "nullfs": self.fstab.release = self.release else: self.fstab.release = None self.fstab.read_file() self.fstab.save() def exec( self, command: typing.List[str], env: typing.Dict[str, str]={}, passthru: bool=False, **kwargs: typing.Any ) -> libioc.helpers.CommandOutput: """ Execute a command in a running jail. command (list): A list of command and it's arguments Example: ["/usr/bin/whoami"] env (dict): The dictionary may contain env variables that will be forwarded to the executed jail command. passthru (bool): (default=False) When enabled the commands stdout and stderr are directory forwarded to the attached terminal. The results will not be included in the CommandOutput, so that (None, None, <returncode>) is returned. """ command = ["/usr/sbin/jexec", str(self.jid)] + command command_env = self.env for env_key, env_value in env.items(): command_env[env_key] = env_value stdout, stderr, returncode = self._exec_host_command( command, env=command_env, passthru=passthru ) return stdout, stderr, returncode def passthru( self, command: typing.List[str], env: typing.Optional[typing.Dict[str, str]]=None ) -> libioc.helpers.CommandOutput: """ Execute a command in a started jail and passthrough STDIN and STDOUT. command (list): A list of command and it's arguments Example: ["/bin/sh"] """ if isinstance(command, str): command = [command] return self._exec_host_command( command=[ "/usr/sbin/jexec", str(self.jid) ] + command, passthru=True, env=env ) def exec_console( self ) -> libioc.helpers.CommandOutput: """Shortcut to drop into a shell of a started jail.""" self.require_jail_running() return self.passthru( ["/usr/bin/login"] + self.config["login_flags"] ) def _destroy_jail(self, log_errors: bool=True) -> None: stdout, stderr, returncode = self._exec_host_command( [ "/usr/sbin/jail", "-v", "-r", "-f", self._jail_conf_file, self.identifier ], passthru=False, env=self.env ) if returncode > 0: raise libioc.errors.JailDestructionFailed( jail=self, logger=(self.logger if log_errors else None) ) @property def _dhcp_enabled(self) -> bool: """Return True if any ip4_addr uses DHCP.""" if self.config["ip4_addr"] is None: return False return ("dhcp" in self.config["ip4_addr"].networks) is True @property def devfs_ruleset(self) -> libioc.DevfsRules.DevfsRuleset: """ Return the number of the jail's devfs ruleset. When a new combination of the base ruleset specified in jail.config["devfs_ruleset"] and rules automatically added by iocage appears, the according rule is automatically created and added to the /etc/devfs.rules file on the host Users may reference a rule by numeric identifier or name. This numbers are automatically selected, so it's advisable to use names.1 """ try: configured_devfs_ruleset = self.host.devfs.find_by_number( int(self.config["devfs_ruleset"]) ) except ValueError: configured_devfs_ruleset = self.host.devfs.find_by_name( self.config["devfs_ruleset"] ) devfs_ruleset = libioc.DevfsRules.DevfsRuleset() devfs_ruleset.clone(configured_devfs_ruleset) if self._dhcp_enabled is True: devfs_ruleset.append("add path 'bpf*' unhide") if self._allow_mount_zfs == "1": devfs_ruleset.append("add path zfs unhide") if self.config["jail_zfs"] is True: unhidden_parents: typing.Set[str] = set() shared_datasets = self._zfs_share_storage.get_zfs_datasets() if len(shared_datasets) > 0: devfs_ruleset.append("add path zvol unhide") for shared_dataset in shared_datasets: current_dataset_name = "zvol" for fragment in shared_dataset.name.split("/"): current_dataset_name += f"/{fragment}" if current_dataset_name in unhidden_parents: continue unhidden_parents.add(current_dataset_name) devfs_ruleset.append( f"add path {current_dataset_name} unhide" ) devfs_ruleset.append( f"add path {current_dataset_name}/* unhide" ) if self.config["allow_vmm"] is True: devfs_ruleset.append("add path vmm unhide") devfs_ruleset.append("add path vmm/* unhide") devfs_ruleset.append("add path nmdm* unhide") # create if the final rule combination does not exist as ruleset if devfs_ruleset not in self.host.devfs: self.logger.verbose("New devfs ruleset combination") # note: name and number of devfs_ruleset are both None new_ruleset_number = self.host.devfs.new_ruleset(devfs_ruleset) self.host.devfs.save() return new_ruleset_number else: ruleset_line_position = self.host.devfs.index(devfs_ruleset) return self.host.devfs[ruleset_line_position].number @staticmethod def __get_launch_command(jail_args: typing.List[str]) -> typing.List[str]: return ["/usr/sbin/jail", "-c"] + jail_args @property def _launch_args(self) -> typing.List[str]: config = self.config vnet = (config["vnet"] is True) value: str jail_param_args: typing.List[str] = [] for sysctl_name, sysctl in libioc.JailParams.JailParams().items(): if sysctl.ctl_type == freebsd_sysctl.types.NODE: # skip NODE continue if sysctl_name == "security.jail.param.devfs_ruleset": value = str(self.devfs_ruleset) elif sysctl_name == "security.jail.param.path": value = self.root_dataset.mountpoint elif sysctl_name == "security.jail.param.name": value = self.identifier elif sysctl_name == "security.jail.param.allow.mount.zfs": value = str(self._allow_mount_zfs) elif sysctl_name == "security.jail.param.vnet": if vnet is False: # vnet is only used when explicitly enabled # (friendly to Kernels without VIMAGE support) continue value = "vnet" elif vnet and sysctl_name.startswith("security.jail.param.ip"): continue else: config_property_name = sysctl.iocage_name if self.config._is_known_property(config_property_name): value = config[config_property_name] else: continue sysctl.value = value jail_param_args.append(str(sysctl)) jail_args = [ f"exec.timeout={self._get_value('exec_timeout')}", f"stop.timeout={self._get_value('stop_timeout')}", f"exec.prestart=\"{self.get_hook_script_path('prestart')}\"", f"exec.prestop=\"{self.get_hook_script_path('prestop')}\"", f"exec.poststop=\"{self.get_hook_script_path('poststop')}\"", f"exec.jail_user={self._get_value('exec_jail_user')}", f"mount.fstab={self.fstab.path}", f"mount.devfs={self._get_value('mount_devfs')}", "allow.dying" ] return jail_param_args + jail_args def _launch_persistent_jail( self, passthru: bool ) -> libioc.helpers.CommandOutput: command = self.__get_launch_command(self._launch_args + [ "persist", f"exec.poststart=\"{self.get_hook_script_path('poststart')}\"" ]) stdout, stderr, returncode = self._exec_host_command( command=command, passthru=passthru, env=self.env ) if returncode > 0: self.logger.verbose( f"Jail '{self.humanreadable_name}' was not started" ) return stdout, stderr, returncode self.state.query() self.logger.verbose( f"Jail '{self.humanreadable_name}' started with JID {self.jid}" ) return stdout, stderr, returncode def _exec_host_command( self, command: typing.List[str], passthru: bool, env: typing.Optional[typing.Dict[str, str]]=None ) -> libioc.helpers.CommandOutput: try: if passthru is True: return libioc.helpers.exec_passthru( command, logger=self.logger, env=env ) else: exec_events = libioc.helpers.exec_generator( command, logger=self.logger, env=env ) try: while True: self.logger.spam( next(exec_events).decode("UTF-8"), indent=1 ) except StopIteration as return_statement: output: libioc.helpers.CommandOutput output = return_statement.value # noqa: T484 return output except (KeyboardInterrupt, SystemExit): raise libioc.errors.JailExecutionAborted( jail=self, logger=None ) def _launch_single_command_jail( self, jail_command: str, passthru: bool ) -> libioc.helpers.CommandOutput: command = self.__get_launch_command(self._launch_args + [ "nopersist", f"exec.poststart=\"{self.get_hook_script_path('host_command')}\"", "command=/usr/bin/true" ]) _identifier = str(shlex.quote(self.identifier)) _jls_command = f"/usr/sbin/jls -j {_identifier} jid" self._write_hook_script("host_command", "\n".join( [ f"IOC_JID=$({_jls_command} 2>&1 || echo -1)", "set -e", f"/bin/sh {self.get_hook_script_path('created')}", ( f"/usr/sbin/jexec {self.identifier} " f"{self._relative_hook_script_dir}/command.sh" " 2>&1" ), f"/bin/sh {self.get_hook_script_path('poststop')}" ] )) _ipfw_enabled = self.host.ipfw_enabled self._write_hook_script("command", "\n".join( (["set +e", "service ipfw onestop"] if _ipfw_enabled else []) + [ "set -e", f". {self._relative_hook_script_dir}/start.sh", jail_command, ] )) stdout, stderr, returncode = self._exec_host_command( command=command, passthru=passthru, env=self.env ) if returncode > 0: message = f"Jail {self.humanreadable_name} command failed." else: message = f"Jail {self.humanreadable_name} command finished." self.logger.verbose(message) return stdout, stderr, returncode def _get_value(self, key: str) -> str: """Return jail command consumable config value string.""" return str(libioc.helpers.to_string( self.config[key], true="1", false="0", none="" )) @property def networks(self) -> typing.List[libioc.Network.Network]: """Return the list of a jails configured networks.""" networks = [] nics = self.config["interfaces"] if nics is None: return [] for nic in nics: bridge = nics[nic] try: ipv4_addresses = self.config["ip4_addr"][nic] except (KeyError, TypeError): ipv4_addresses = [] try: ipv6_addresses = self.config["ip6_addr"][nic] except (KeyError, TypeError): ipv6_addresses = [] net = libioc.Network.Network( jail=self, nic=nic, ipv4_addresses=ipv4_addresses, ipv6_addresses=ipv6_addresses, bridge=bridge, logger=self.logger ) networks.append(net) return networks def _write_hook_script(self, hook_name: str, command_string: str) -> None: file = self.get_hook_script_path(hook_name) existed = os.path.isfile(file) if hook_name in ["created", "poststart", "prestop"]: _identifier = str(shlex.quote(self.identifier)) _jls_command = f"/usr/sbin/jls -j {_identifier} jid" command_string = ( "IOC_JID=" f"$({_jls_command} 2>&1 || echo -1)" "\n" + command_string ) if hook_name == "poststop": command_string = ( "[ -f \"$(dirname $0)/.env\" ] && " ". \"$(dirname $0)/.env\"" "\n" ) + command_string with open(file, "w") as f: f.write("\n".join([ "#!/bin/sh", command_string ])) if existed is False: shutil.chown(file, "root", "wheel") os.chmod(file, 0o755) # nosec: executable script @property def launch_script_dir(self) -> str: """Return the launch-scripts directory path of the jail.""" return f"{self.jail.dataset.mountpoint}/launch-scripts" @property def script_env_path(self) -> str: """Return the absolute path to the jail script env file.""" return f"{self.launch_script_dir}/.env" def get_hook_script_path(self, hook_name: str) -> str: """Return the absolute path to the hook script file.""" return f"{self.jail.launch_script_dir}/{hook_name}.sh" def _start_vimage_network(self) -> typing.Tuple[ 'libioc.Network.CreatedCommandList', 'libioc.Network.JailCommandList' ]: self.logger.debug("Starting VNET/VIMAGE") created: typing.List[str] = [] start: typing.List[str] = [] for network in self.networks: _created, _start = network.setup() created += _created start += _start return created, start def _stop_network(self) -> typing.List[str]: if self.config["vnet"]: return self._stop_vimage_network() else: return self._stop_non_vimage_network() def _stop_non_vimage_network(self) -> typing.List[str]: commands: typing.List[str] = [] for protocol in (4, 6,): config_value = self.config[f"ip{protocol}_addr"] if config_value is None: return commands for nic, addresses in config_value.items(): if addresses is None: continue for address in addresses: if isinstance(address, str): # skip DHCP and ACCEPT_RTADV continue inet = "inet" if (protocol == 4) else "inet6" commands.append( f"/sbin/ifconfig {nic} {inet} {address} remove" ) return commands def _stop_vimage_network(self) -> typing.List[str]: commands: typing.List[str] = [] for network in self.networks: commands += network.teardown() return commands def _configure_localhost_commands(self) -> typing.List[str]: return ["/sbin/ifconfig lo0 localhost"] def _get_resource_limits_commands(self) -> typing.List[str]: commands: typing.List[str] = [] if self.config['rlimits'] is False: self.logger.verbose("Resource limits disabled") return commands for key in libioc.Config.Jail.Properties.ResourceLimit.properties: try: rlimit_prop = self.config[key] if rlimit_prop.is_unset is True: continue except (KeyError, AttributeError): continue commands.append(" ".join([ "/usr/bin/rctl", "-a", f"jail:{self.identifier}:{key}:{rlimit_prop.limit_string}" ])) return commands def _clear_resource_limits(self) -> typing.List[str]: if self.config['rlimits'] is False: return [] self.logger.verbose("Clearing resource limits") return [f"/usr/bin/rctl -r jail:{self.identifier} 2>/dev/null || true"] @property def _allow_mount(self) -> int: if self._allow_mount_zfs == 1: return 1 return int(self._get_value("allow_mount")) @property def _allow_mount_zfs(self) -> int: if self.config["jail_zfs"] is True: return 1 return int(self._get_value("allow_mount_zfs")) def _configure_routes_commands(self) -> typing.List[str]: defaultrouter = self.config["defaultrouter"] defaultrouter6 = self.config["defaultrouter6"] commands: typing.List[str] = [] if defaultrouter is not None: commands += list(defaultrouter.apply(jail=self)) if defaultrouter6 is not None: commands += list(defaultrouter6.apply(jail=self)) if len(commands) == 0: self.logger.spam("no static routes configured") return commands def require_jail_is_template(self, log_errors: bool=True) -> None: """Raise JailIsTemplate exception if the jail is a template.""" if self.config['template'] is False: raise libioc.errors.JailNotTemplate( jail=self, logger=(self.logger if log_errors else None) ) def require_jail_match_hostid(self, log_errors: bool=True) -> None: """Raise JailIsTemplate exception if the jail is a template.""" if self.hostid_check_ok is False: raise libioc.errors.JailHostIdMismatch( jail=self, host_hostid=self.host.id, logger=(self.logger if log_errors else None) ) @property def hostid_check_ok(self) -> bool: """Return true if the hostid check passes.""" if self.config["hostid_strict_check"] is False: self.logger.spam("hostid_strict_check is disabled") return True jail_hostid = self.config["hostid"] if (jail_hostid is None) or (jail_hostid == self.host.id): return True return False def require_storage_backend(self, log_errors: bool=True) -> None: """Raise if the jail was not initialized with a storage backend.""" if self.storage_backend is None: raise Exception("The jail has no storage backend.") def require_jail_not_template(self, log_errors: bool=True) -> None: """Raise JailIsTemplate exception if the jail is a template.""" if self.config['template'] is True: raise libioc.errors.JailIsTemplate( jail=self, logger=(self.logger if log_errors else None) ) def require_jail_not_existing(self, log_errors: bool=True) -> None: """Raise JailAlreadyExists exception if the jail already exists.""" if self.exists: raise libioc.errors.JailAlreadyExists( jail=self, logger=(self.logger if log_errors else None) ) def require_jail_existing(self, log_errors: bool=True) -> None: """Raise JailDoesNotExist exception if the jail does not exist.""" if not self.exists: raise libioc.errors.JailDoesNotExist( jail=self, logger=(self.logger if log_errors else None) ) def require_jail_stopped(self, log_errors: bool=True) -> None: """Raise JailAlreadyRunning exception if the jail is running.""" if self.running is not False: raise libioc.errors.JailAlreadyRunning( jail=self, logger=(self.logger if log_errors else None) ) def require_jail_running(self, log_errors: bool=True) -> None: """Raise JailNotRunning exception if the jail is stopped.""" if not self.running: raise libioc.errors.JailNotRunning( jail=self, logger=(self.logger if log_errors else None) ) def _teardown_mounts(self) -> typing.List[str]: commands: typing.List[str] = [] fstab_destinations = [line["destination"] for line in self.fstab] system_mountpoints = list(filter( os.path.isdir, map( self._get_absolute_path_from_jail_asset, [ "/dev/fd", "/dev", "/proc", "/root/compat/linux/proc", "/root/etcupdate", "/root/usr/ports", "/root/usr/src", "/tmp" # nosec: B108 ] ) )) mountpoints = fstab_destinations + system_mountpoints commands.append(" ".join(libioc.helpers.umount_command( mountpoints, force=True, ignore_error=True ))) commands.append(" ".join(libioc.helpers.umount_command( ["-a", "-F", self.fstab.path], force=True, ignore_error=True ))) if self.config.legacy is True: commands.append(" | ".join([ "mount -t nullfs", "sed -r 's/(.+) on (.+) \\(nullfs, .+\\)$/\\2/'", f"grep '^{self.root_dataset.mountpoint}/'", "xargs umount" ])) return commands def _get_absolute_path_from_jail_asset( self, value: str ) -> libioc.Types.AbsolutePath: return libioc.Types.AbsolutePath(f"{self.root_path}{value}") def _resolve_name(self, text: str) -> str: if (text is None) or (len(text) == 0): raise libioc.errors.JailNotSupplied(logger=self.logger) resource_selector = libioc.ResourceSelector.ResourceSelector( name=text, logger=self.logger ) root_datasets = resource_selector.filter_datasets(self.host.datasets) for datasets_key, datasets in root_datasets.items(): for dataset in list(datasets.jails.children): dataset_name = str( dataset.name[(len(datasets.jails.name) + 1):] ) humanreadable_name = libioc.helpers.to_humanreadable_name( dataset_name ) possible_names = [dataset_name, humanreadable_name] if resource_selector.name in possible_names: return dataset_name raise libioc.errors.JailNotFound(text, logger=self.logger) @property def name(self) -> str: """Return the configured jail id.""" return str(self.config["id"]) @property def full_name(self) -> str: """ Return the full identifier of a jail. When more than one root dataset is managed by iocage, the full source and name are returned. Otherwise just the name. For example `mydataset/jailname` or just `jailname`. """ if len(self.host.datasets) > 1: return f"{self.source}/{self.name}" else: return self.name @property def humanreadable_name(self) -> str: """ Return the human-readable identifier to print in logs and CLI output. Whenever a Jail is found to have a UUID as identifier, a shortened string of the first 8 characters is returned """ try: return str(libioc.helpers.to_humanreadable_name(self.name)) except KeyError: raise libioc.errors.JailUnknownIdentifier( logger=self.logger ) @property def stopped(self) -> bool: """Return True if a jail is stopped.""" return self.running is not True @property def running(self) -> bool: """Return True if a jail is running.""" return self.jid is not None @property def jid(self) -> typing.Optional[int]: """Return a jails JID if it is running or None.""" if "_state" not in object.__dir__(self): # force state init when jid was requested self._init_state() try: return int(self.state["jid"]) except (KeyError, TypeError): return None @property def env(self) -> typing.Dict[str, str]: """Return the environment variables for hook scripts.""" jail_env: typing.Dict[str, str] if self.config["exec_clean"] is False: jail_env = os.environ.copy() else: jail_env = {} for prop in self.config.all_properties: prop_name = f"IOC_{prop.replace('.', '_').upper()}" jail_env[prop_name] = str(self.config[prop]) jail_env["IOC_JAIL_PATH"] = self.root_dataset.mountpoint jail_env["IOC_JID"] = str(self.jid) jail_env["PATH"] = ":".join(( "/sbin", "/bin", "/usr/sbin", "/usr/bin", "/usr/local/sbin", "/usr/local/bin", )) return jail_env @property def identifier(self) -> str: """Return the jail id used in snapshots, jls, etc.""" config = object.__getattribute__(self, 'config') return f"{self.source}-{config['id']}" @property def release(self) -> 'libioc.Release.ReleaseGenerator': """Return the libioc.Release instance linked with the jail.""" return libioc.Release.ReleaseGenerator( name=self.config["release"], root_datasets_name=self.root_datasets_name, logger=self.logger, host=self.host, zfs=self.zfs ) @property def release_snapshot(self) -> libzfs.ZFSSnapshot: """Return the matching release verion snaphsot.""" snapshot: libzfs.ZFSSnapshot = self.release.current_snapshot return snapshot def __getattribute__(self, key: str) -> typing.Any: """Get an attribute from the jail, state or configuration.""" try: return object.__getattribute__(self, key) except AttributeError: pass if "_state" in object.__dir__(self): try: return object.__getattribute__(self, "state")[key] except (AttributeError, KeyError): pass raise AttributeError(f"Jail property {key} not found") def __dir__(self) -> typing.List[str]: """Get all accessible properties of a jail.""" properties = set() for prop in dict.__dir__(self): if not prop.startswith("_"): properties.add(prop) return list(properties) def __eq__(self, other: typing.Any) -> bool: """ Compare two Jails by their name. The jail is identified by its full name, including the iocage root dataset name in case there is more than one enabled on the host. """ if isinstance(other, JailGenerator): return False return (self.full_name == other.full_name) is True class Jail(JailGenerator): """Synchronous wrapper of JailGenerator.""" def start( # noqa: T484 self, *args, **kwargs ) -> typing.List['libioc.events.IocEvent']: """Start the jail.""" return list(JailGenerator.start(self, *args, **kwargs)) def stop( # noqa: T484 self, *args, **kwargs ) -> typing.List['libioc.events.IocEvent']: """Stop the jail.""" return list(JailGenerator.stop(self, *args, **kwargs)) def rename( # noqa: T484 self, *args, **kwargs ) -> typing.List['libioc.events.IocEvent']: """Rename the jail.""" return list(JailGenerator.rename(self, *args, **kwargs)) def _update_fstab_paths( # noqa: T484 self, *args, **kwargs ) -> typing.List['libioc.events.IocEvent']: """Update a path in the whole fstab file.""" return list(JailGenerator._update_fstab_paths(self, *args, **kwargs)) def destroy( # noqa: T484 self, force: bool=False ) -> typing.List['libioc.events.IocEvent']: """ Destroy a Jail and it's datasets. Args: force (bool): (default=False) This flag enables whether an existing jail should be shut down before destroying the dataset. By default destroying a jail requires it to be stopped. """ return list(JailGenerator.destroy(self, force=force)) def fork_exec( # noqa: T484 self, command: str, passthru: bool=False, event_scope: typing.Optional['libioc.events.Scope']=None, dependant_jails_seen: typing.List['JailGenerator']=[], start_dependant_jails: bool=True, **temporary_config_override ) -> str: """ Start a jail, run a command and shut it down immediately. Args: command (string): The command to execute in the jail. passthru (bool): Execute commands in an interactive shell. event_scope (libioc.lib.events.Scope): (default=None) Provide an existing libiocage event scope or automatically create a new one instead. dependant_jails_seen (list[libioc.JailGenerator]): Jail depends can have circular dependencies. By passing a list of already started jails to the start command, iocage does not need to query their state, because they are known to be running already. This argument is internally used when starting a jails dependants recursively. start_dependant_jails (bool): When disabled, no dependant jails will be started. **temporary_config_override (dict(str, any)): Other named arguments temporary override JailConfig properties. For example: jail = libioc.JailGenerator("myjail") events = jail.fork_exec("ifconfig", vnet=False) print(list(events)) """ events = JailGenerator.fork_exec( self, command=command, passthru=passthru, event_scope=event_scope, dependant_jails_seen=dependant_jails_seen, start_dependant_jails=start_dependant_jails, **temporary_config_override ) for event in events: if isinstance(event, libioc.events.JailLaunch) and event.done: return str(event.stdout)
# Copyright Contributors to the Packit project. # SPDX-License-Identifier: MIT """ Data layer on top of PSQL using sqlalch """ import enum import logging import os from contextlib import contextmanager from datetime import datetime, timedelta from typing import ( Dict, Iterable, List, Optional, TYPE_CHECKING, Tuple, Type, Union, ) from urllib.parse import urlparse from sqlalchemy import ( Boolean, Column, DateTime, Enum, ForeignKey, Integer, JSON, String, Text, create_engine, desc, func, null, case, ) from sqlalchemy.dialects.postgresql import array as psql_array from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import Session, relationship, scoped_session, sessionmaker from sqlalchemy.types import ARRAY from packit.config import JobConfigTriggerType from packit.exceptions import PackitException from packit_service.constants import ALLOWLIST_CONSTANTS logger = logging.getLogger(__name__) # SQLAlchemy session, get it with `get_sa_session` session_instance = None def get_pg_url() -> str: """create postgresql connection string""" return ( f"postgresql+psycopg2://{os.getenv("POSTGRESQL_USER")}" f":{os.getenv("POSTGRESQL_PASSWORD")}@{os.getenv("POSTGRESQL_HOST", "postgres")}" f":{os.getenv("POSTGRESQL_PORT", "5432")}/{os.getenv("POSTGRESQL_DATABASE")}" ) engine = create_engine(get_pg_url()) ScopedSession = scoped_session(sessionmaker(bind=engine)) @contextmanager def get_sa_session() -> Session: """get SQLAlchemy session""" session = ScopedSession() try: yield session session.commit() except Exception as ex: logger.warning(f"Exception while working with database: {ex!r}") session.rollback() raise def optional_time( datetime_object: Union[datetime, None], fmt: str = "%d/%m/%Y %H:%M:%S" ) -> Union[str, None]: """ Returns a formatted date-time string if argument is a datetime object. Args: datetime_object: date-time to be converted to string fmt: format string to be used to produce the string. Defaults to `"%d/%m/%Y %H:%M:%S"`. Returns: Formatted date-time or `None` if no datetime is provided. """ if datetime_object is None: return None return datetime_object.strftime(fmt) def optional_timestamp(datetime_object: Optional[datetime]) -> Optional[int]: """ Returns a UNIX timestamp if argument is a datetime object. Args: datetime_object: Date-time to be converted to timestamp. Returns: UNIX timestamp or `None` if no datetime object is provided. """ if datetime_object is None: return None return int(datetime_object.timestamp()) # https://github.com/python/mypy/issues/2477#issuecomment-313984522 ^_^ if TYPE_CHECKING: Base = object else: Base = declarative_base() class JobTriggerModelType(str, enum.Enum): pull_request = "pull_request" branch_push = "branch_push" release = "release" issue = "issue" class BuildsAndTestsConnector: """ Abstract class that is inherited by trigger models to share methods for accessing build/test models.. """ id: int job_trigger_model_type: JobTriggerModelType def get_runs(self) -> List["PipelineModel"]: with get_sa_session() as session: trigger_list = ( session.query(JobTriggerModel) .filter_by(type=self.job_trigger_model_type, trigger_id=self.id) .all() ) if len(trigger_list) > 1: msg = ( f"There are multiple run models for type {self.job_trigger_model_type}" f"and id={self.id}." ) logger.error(msg) raise PackitException(msg) return trigger_list[0].runs if trigger_list else [] def _get_run_item( self, model_type: Type["AbstractBuildTestDbType"] ) -> List["AbstractBuildTestDbType"]: runs = self.get_runs() models = [] if model_type == CoprBuildTargetModel: models = [run.copr_build for run in runs] if model_type == KojiBuildTargetModel: models = [run.koji_build for run in runs] if model_type == SRPMBuildModel: models = [run.srpm_build for run in runs] if model_type == TFTTestRunTargetModel: models = [run.test_run for run in runs] return list({model for model in models if model is not None}) def get_copr_builds(self): return self._get_run_item(model_type=CoprBuildTargetModel) def get_koji_builds(self): return self._get_run_item(model_type=KojiBuildTargetModel) def get_srpm_builds(self): return self._get_run_item(model_type=SRPMBuildModel) def get_test_runs(self): return self._get_run_item(model_type=TFTTestRunTargetModel) class ProjectAndTriggersConnector: """ Abstract class that is inherited by build/test models to share methods for accessing project and trigger models. """ runs: Optional[List["PipelineModel"]] def get_job_trigger_model(self) -> Optional["JobTriggerModel"]: if not self.runs: return None return self.runs[0].job_trigger def get_trigger_object(self) -> Optional["AbstractTriggerDbType"]: job_trigger = self.get_job_trigger_model() if not job_trigger: return None return job_trigger.get_trigger_object() def get_project(self) -> Optional["GitProjectModel"]: trigger_object = self.get_trigger_object() if not trigger_object: return None return trigger_object.project def get_pr_id(self) -> Optional[int]: trigger_object = self.get_trigger_object() if isinstance(trigger_object, PullRequestModel): return trigger_object.pr_id return None def get_issue_id(self) -> Optional[int]: trigger_object = self.get_trigger_object() if not isinstance(trigger_object, IssueModel): return None return trigger_object.issue_id def get_branch_name(self) -> Optional[str]: trigger_object = self.get_trigger_object() if isinstance(trigger_object, GitBranchModel): return trigger_object.name return None def get_release_tag(self) -> Optional[str]: trigger_object = self.get_trigger_object() if isinstance(trigger_object, ProjectReleaseModel): return trigger_object.tag_name return None class GitProjectModel(Base): __tablename__ = "git_projects" id = Column(Integer, primary_key=True) # github.com/NAMESPACE/REPO_NAME # git.centos.org/NAMESPACE/REPO_NAME namespace = Column(String, index=True) repo_name = Column(String, index=True) pull_requests = relationship("PullRequestModel", back_populates="project") branches = relationship("GitBranchModel", back_populates="project") releases = relationship("ProjectReleaseModel", back_populates="project") issues = relationship("IssueModel", back_populates="project") project_authentication_issue = relationship( "ProjectAuthenticationIssueModel", back_populates="project" ) # Git URL of the repo # Example: https://github.com/packit/hello-world.git https_url = Column(String) project_url = Column(String) instance_url = Column(String, nullable=False) def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.instance_url = urlparse(self.project_url).hostname @classmethod def get_or_create( cls, namespace: str, repo_name: str, project_url: str ) -> "GitProjectModel": with get_sa_session() as session: project = ( session.query(GitProjectModel) .filter_by( namespace=namespace, repo_name=repo_name, project_url=project_url ) .first() ) if not project: project = cls( repo_name=repo_name, namespace=namespace, project_url=project_url ) session.add(project) return project @classmethod def get_projects(cls, first: int, last: int) -> Iterable["GitProjectModel"]: with get_sa_session() as session: return ( session.query(GitProjectModel) .order_by(GitProjectModel.namespace) .slice(first, last) ) @classmethod def get_forge( cls, first: int, last: int, forge: str ) -> Iterable["GitProjectModel"]: """Return projects of given forge""" with get_sa_session() as session: return ( session.query(GitProjectModel) .filter_by(instance_url=forge) .order_by(GitProjectModel.namespace) .slice(first, last) ) @classmethod def get_namespace(cls, forge: str, namespace: str) -> Iterable["GitProjectModel"]: """Return projects of given forge and namespace""" with get_sa_session() as session: projects = ( session.query(GitProjectModel).filter_by(namespace=namespace).all() ) matched_projects = [] for project in projects: forge_domain = urlparse(project.project_url).hostname if forge == forge_domain: matched_projects.append(project) return matched_projects @classmethod def get_project( cls, forge: str, namespace: str, repo_name: str ) -> Optional["GitProjectModel"]: """Return one project which matches said criteria""" with get_sa_session() as session: project = ( session.query(cls) .filter_by(instance_url=forge, namespace=namespace, repo_name=repo_name) .one_or_none() ) return project @classmethod def get_project_prs( cls, first: int, last: int, forge: str, namespace: str, repo_name: str ) -> Iterable["PullRequestModel"]: with get_sa_session() as session: return ( session.query(PullRequestModel) .join(GitProjectModel) .filter( PullRequestModel.project_id == GitProjectModel.id, GitProjectModel.instance_url == forge, GitProjectModel.namespace == namespace, GitProjectModel.repo_name == repo_name, ) .order_by(desc(PullRequestModel.pr_id)) .slice(first, last) ) @classmethod def get_project_issues( cls, forge: str, namespace: str, repo_name: str ) -> Optional[Iterable["IssueModel"]]: with get_sa_session() as session: issues = ( session.query(IssueModel) .join(GitProjectModel) .filter( IssueModel.project_id == GitProjectModel.id, GitProjectModel.instance_url == forge, GitProjectModel.namespace == namespace, GitProjectModel.repo_name == repo_name, ) .all() ) return issues @classmethod def get_project_branches( cls, forge: str, namespace: str, repo_name: str ) -> Optional[Iterable["GitBranchModel"]]: with get_sa_session() as session: branches = ( session.query(GitBranchModel) .join(GitProjectModel) .filter( GitBranchModel.project_id == GitProjectModel.id, GitProjectModel.instance_url == forge, GitProjectModel.namespace == namespace, GitProjectModel.repo_name == repo_name, ) .all() ) return branches @classmethod def get_project_releases( cls, forge: str, namespace: str, repo_name: str ) -> Optional[Iterable["ProjectReleaseModel"]]: with get_sa_session() as session: releases = ( session.query(ProjectReleaseModel) .join(GitProjectModel) .filter( ProjectReleaseModel.project_id == GitProjectModel.id, GitProjectModel.instance_url == forge, GitProjectModel.namespace == namespace, GitProjectModel.repo_name == repo_name, ) .all() ) return releases def __repr__(self): return ( f"GitProjectModel(name={self.namespace}/{self.repo_name}, " f"project_url='{self.project_url}')" ) class PullRequestModel(BuildsAndTestsConnector, Base): __tablename__ = "pull_requests" id = Column(Integer, primary_key=True) # our database PK # GitHub PR ID # this is not our PK b/c: # 1) we don't control it # 2) we want sensible auto-incremented ID, not random numbers # 3) it's not unique across projects obviously, so why am I even writing this? pr_id = Column(Integer, index=True) project_id = Column(Integer, ForeignKey("git_projects.id")) project = relationship("GitProjectModel", back_populates="pull_requests") # CentOS Pagure only bugzilla = relationship("BugzillaModel", back_populates="pull_request") job_config_trigger_type = JobConfigTriggerType.pull_request job_trigger_model_type = JobTriggerModelType.pull_request @classmethod def get_or_create( cls, pr_id: int, namespace: str, repo_name: str, project_url: str ) -> "PullRequestModel": with get_sa_session() as session: project = GitProjectModel.get_or_create( namespace=namespace, repo_name=repo_name, project_url=project_url ) pr = ( session.query(PullRequestModel) .filter_by(pr_id=pr_id, project_id=project.id) .first() ) if not pr: pr = PullRequestModel() pr.pr_id = pr_id pr.project_id = project.id session.add(pr) return pr @classmethod def get_by_id(cls, id_: int) -> Optional["PullRequestModel"]: with get_sa_session() as session: return session.query(PullRequestModel).filter_by(id=id_).first() def __repr__(self): return f"PullRequestModel(pr_id={self.pr_id}, project={self.project})" class IssueModel(BuildsAndTestsConnector, Base): __tablename__ = "project_issues" id = Column(Integer, primary_key=True) # our database PK issue_id = Column(Integer, index=True) project_id = Column(Integer, ForeignKey("git_projects.id")) project = relationship("GitProjectModel", back_populates="issues") # TODO: Fix this hardcoding! This is only to make propose-downstream work! job_config_trigger_type = JobConfigTriggerType.release job_trigger_model_type = JobTriggerModelType.issue @classmethod def get_or_create( cls, issue_id: int, namespace: str, repo_name: str, project_url: str ) -> "IssueModel": with get_sa_session() as session: project = GitProjectModel.get_or_create( namespace=namespace, repo_name=repo_name, project_url=project_url ) issue = ( session.query(IssueModel) .filter_by(issue_id=issue_id, project_id=project.id) .first() ) if not issue: issue = IssueModel() issue.issue_id = issue_id issue.project_id = project.id session.add(issue) return issue @classmethod def get_by_id(cls, id_: int) -> Optional["IssueModel"]: with get_sa_session() as session: return session.query(IssueModel).filter_by(id=id_).first() def __repr__(self): return f"IssueModel(id={self.issue_id}, project={self.project})" class GitBranchModel(BuildsAndTestsConnector, Base): __tablename__ = "git_branches" id = Column(Integer, primary_key=True) # our database PK name = Column(String) project_id = Column(Integer, ForeignKey("git_projects.id")) project = relationship("GitProjectModel", back_populates="branches") job_config_trigger_type = JobConfigTriggerType.commit job_trigger_model_type = JobTriggerModelType.branch_push @classmethod def get_or_create( cls, branch_name: str, namespace: str, repo_name: str, project_url: str ) -> "GitBranchModel": with get_sa_session() as session: project = GitProjectModel.get_or_create( namespace=namespace, repo_name=repo_name, project_url=project_url ) git_branch = ( session.query(GitBranchModel) .filter_by(name=branch_name, project_id=project.id) .first() ) if not git_branch: git_branch = GitBranchModel() git_branch.name = branch_name git_branch.project_id = project.id session.add(git_branch) return git_branch @classmethod def get_by_id(cls, id_: int) -> Optional["GitBranchModel"]: with get_sa_session() as session: return session.query(GitBranchModel).filter_by(id=id_).first() def __repr__(self): return f"GitBranchModel(name={self.name}, project={self.project})" class BugzillaModel(Base): __tablename__ = "bugzillas" id = Column(Integer, primary_key=True) bug_id = Column(Integer, index=True) bug_url = Column(String) pull_request_id = Column(Integer, ForeignKey("pull_requests.id")) pull_request = relationship("PullRequestModel", back_populates="bugzilla") @classmethod def get_or_create( cls, pr_id: int, namespace: str, repo_name: str, project_url: str, bug_id: int = None, bug_url: str = None, ) -> "BugzillaModel": with get_sa_session() as session: pull_request = PullRequestModel.get_or_create( pr_id=pr_id, namespace=namespace, repo_name=repo_name, project_url=project_url, ) bugzilla = ( session.query(BugzillaModel) .filter_by(pull_request_id=pull_request.id) .first() ) if not bugzilla and bug_id and bug_url: bugzilla = BugzillaModel() bugzilla.bug_id = bug_id bugzilla.bug_url = bug_url bugzilla.pull_request_id = pull_request.id session.add(bugzilla) return bugzilla @classmethod def get_by_pr( cls, pr_id: int, namespace: str, repo_name: str, project_url: str, ) -> Optional["BugzillaModel"]: return cls.get_or_create( pr_id=pr_id, namespace=namespace, repo_name=repo_name, project_url=project_url, ) def __repr__(self): return f"BugzillaModel(bug_id={self.bug_id}, bug_url={self.bug_url})" class ProjectReleaseModel(Base): __tablename__ = "project_releases" id = Column(Integer, primary_key=True) # our database PK tag_name = Column(String) commit_hash = Column(String) project_id = Column(Integer, ForeignKey("git_projects.id")) project = relationship("GitProjectModel", back_populates="releases") job_config_trigger_type = JobConfigTriggerType.release job_trigger_model_type = JobTriggerModelType.release @classmethod def get_or_create( cls, tag_name: str, namespace: str, repo_name: str, project_url: str, commit_hash: Optional[str] = None, ) -> "ProjectReleaseModel": with get_sa_session() as session: project = GitProjectModel.get_or_create( namespace=namespace, repo_name=repo_name, project_url=project_url ) project_release = ( session.query(ProjectReleaseModel) .filter_by(tag_name=tag_name, project_id=project.id) .first() ) if not project_release: project_release = ProjectReleaseModel() project_release.tag_name = tag_name project_release.project = project project_release.commit_hash = commit_hash session.add(project_release) return project_release @classmethod def get_by_id(cls, id_: int) -> Optional["ProjectReleaseModel"]: with get_sa_session() as session: return session.query(ProjectReleaseModel).filter_by(id=id_).first() def __repr__(self): return ( f"ProjectReleaseModel(" f"tag_name={self.tag_name}, " f"project={self.project})" ) AbstractTriggerDbType = Union[ PullRequestModel, ProjectReleaseModel, GitBranchModel, IssueModel, ] MODEL_FOR_TRIGGER: Dict[JobTriggerModelType, Type[AbstractTriggerDbType]] = { JobTriggerModelType.pull_request: PullRequestModel, JobTriggerModelType.branch_push: GitBranchModel, JobTriggerModelType.release: ProjectReleaseModel, JobTriggerModelType.issue: IssueModel, } class JobTriggerModel(Base): """ Model representing a trigger of some packit task. It connects PipelineModel (and built/test models via that model) with models like PullRequestModel, GitBranchModel or ProjectReleaseModel. * It contains type and id of the other database_model. * We know table and id that we need to find in that table. * Each PipelineModel has to be connected to exactly one JobTriggerModel. * There can be multiple PipelineModels for one JobTriggerModel. (e.g. For each push to PR, there will be new PipelineModel, but same JobTriggerModel.) """ __tablename__ = "job_triggers" id = Column(Integer, primary_key=True) # our database PK type = Column(Enum(JobTriggerModelType)) trigger_id = Column(Integer) runs = relationship("PipelineModel", back_populates="job_trigger") @classmethod def get_or_create( cls, type: JobTriggerModelType, trigger_id: int ) -> "JobTriggerModel": with get_sa_session() as session: trigger = ( session.query(JobTriggerModel) .filter_by(type=type, trigger_id=trigger_id) .first() ) if not trigger: trigger = JobTriggerModel() trigger.type = type trigger.trigger_id = trigger_id session.add(trigger) return trigger @classmethod def get_by_id(cls, id_: int) -> "JobTriggerModel": with get_sa_session() as session: return session.query(JobTriggerModel).filter_by(id=id_).first() def get_trigger_object(self) -> Optional[AbstractTriggerDbType]: with get_sa_session() as session: return ( session.query(MODEL_FOR_TRIGGER[self.type]) .filter_by(id=self.trigger_id) .first() ) def __repr__(self): return f"JobTriggerModel(type={self.type}, trigger_id={self.trigger_id})" class PipelineModel(Base): """ Represents one pipeline. Connects JobTriggerModel (and triggers like PullRequestModel via that model) with build/test models like SRPMBuildModel, CoprBuildTargetModel, KojiBuildTargetModel, and TFTTestRunTargetModel. * One model of each build/test model can be connected. * Each build/test model can be connected to multiple PipelineModels (e.g. on retrigger). * Each PipelineModel has to be connected to exactly one JobTriggerModel. * There can be multiple PipelineModels for one JobTriggerModel. (e.g. For each push to PR, there will be new PipelineModel, but same JobTriggerModel.) """ __tablename__ = "pipelines" id = Column(Integer, primary_key=True) # our database PK # datetime.utcnow instead of datetime.utcnow() because its an argument to the function # so it will run when the model is initiated, not when the table is made datetime = Column(DateTime, default=datetime.utcnow) job_trigger_id = Column(Integer, ForeignKey("job_triggers.id")) job_trigger = relationship("JobTriggerModel", back_populates="runs") srpm_build_id = Column(Integer, ForeignKey("srpm_builds.id")) srpm_build = relationship("SRPMBuildModel", back_populates="runs") copr_build_id = Column(Integer, ForeignKey("copr_build_targets.id")) copr_build = relationship("CoprBuildTargetModel", back_populates="runs") koji_build_id = Column(Integer, ForeignKey("koji_build_targets.id")) koji_build = relationship("KojiBuildTargetModel", back_populates="runs") test_run_id = Column(Integer, ForeignKey("tft_test_run_targets.id")) test_run = relationship("TFTTestRunTargetModel", back_populates="runs") propose_downstream_run_id = Column( Integer, ForeignKey("propose_downstream_runs.id") ) propose_downstream_run = relationship( "ProposeDownstreamModel", back_populates="runs" ) @classmethod def create(cls, type: JobTriggerModelType, trigger_id: int) -> "PipelineModel": with get_sa_session() as session: run_model = PipelineModel() run_model.job_trigger = JobTriggerModel.get_or_create( type=type, trigger_id=trigger_id ) session.add(run_model) return run_model def get_trigger_object(self) -> AbstractTriggerDbType: return self.job_trigger.get_trigger_object() def __repr__(self): return f"PipelineModel(id={self.id}, datetime='{datetime}', job_trigger={self.job_trigger})" @classmethod def __query_merged_runs(cls, session): return session.query( func.min(PipelineModel.id).label("merged_id"), PipelineModel.srpm_build_id, func.array_agg(psql_array([PipelineModel.copr_build_id])).label( "copr_build_id" ), func.array_agg(psql_array([PipelineModel.koji_build_id])).label( "koji_build_id" ), func.array_agg(psql_array([PipelineModel.test_run_id])).label( "test_run_id" ), func.array_agg(psql_array([PipelineModel.propose_downstream_run_id])).label( "propose_downstream_run_id", ), ) @classmethod def get_merged_chroots(cls, first: int, last: int) -> Iterable["PipelineModel"]: with get_sa_session() as session: return ( cls.__query_merged_runs(session) .group_by( PipelineModel.srpm_build_id, case( [(PipelineModel.srpm_build_id.isnot(null()), 0)], else_=PipelineModel.id, ), ) .order_by(desc("merged_id")) .slice(first, last) ) @classmethod def get_merged_run(cls, first_id: int) -> Optional[Iterable["PipelineModel"]]: with get_sa_session() as session: return ( cls.__query_merged_runs(session) .filter( PipelineModel.id >= first_id, PipelineModel.id <= first_id + 100 ) .group_by( PipelineModel.srpm_build_id, case( [(PipelineModel.srpm_build_id.isnot(null()), 0)], else_=PipelineModel.id, ), ) .first() ) @classmethod def get_run(cls, id_: int) -> Optional["PipelineModel"]: with get_sa_session() as session: return session.query(PipelineModel).filter_by(id=id_).first() class CoprBuildTargetModel(ProjectAndTriggersConnector, Base): """ Representation of Copr build for one target. """ __tablename__ = "copr_build_targets" id = Column(Integer, primary_key=True) build_id = Column(String, index=True) # copr build id # commit sha of the PR (or a branch, release) we used for a build commit_sha = Column(String) # what's the build status? status = Column(String) # chroot, but we use the word target in our docs target = Column(String) # URL to copr web ui for the particular build web_url = Column(String) # url to copr build logs build_logs_url = Column(String) # for monitoring: time when we set the status about accepted task task_accepted_time = Column(DateTime) # datetime.utcnow instead of datetime.utcnow() because its an argument to the function # so it will run when the copr build is initiated, not when the table is made build_submitted_time = Column(DateTime, default=datetime.utcnow) build_start_time = Column(DateTime) build_finished_time = Column(DateTime) # project name as shown in copr project_name = Column(String) owner = Column(String) # metadata for the build which didn't make it to schema yet # metadata is reserved to sqlalch data = Column(JSON) # info about built packages we get from Copr, e.g. # [ # { # "arch": "noarch", # "epoch": 0, # "name": "python3-packit", # "release": "1.20210930124525726166.main.0.g0b7b36b.fc36", # "version": "0.38.0", # } # ] built_packages = Column(JSON) runs = relationship("PipelineModel", back_populates="copr_build") def set_built_packages(self, built_packages): with get_sa_session() as session: self.built_packages = built_packages session.add(self) def set_start_time(self, start_time: datetime): with get_sa_session() as session: self.build_start_time = start_time session.add(self) def set_end_time(self, end_time: datetime): with get_sa_session() as session: self.build_finished_time = end_time session.add(self) def set_status(self, status: str): with get_sa_session() as session: self.status = status session.add(self) def set_build_logs_url(self, build_logs: str): with get_sa_session() as session: self.build_logs_url = build_logs session.add(self) def get_srpm_build(self) -> Optional["SRPMBuildModel"]: if not self.runs: return None # All SRPMBuild models for all the runs have to be same. return self.runs[0].srpm_build @classmethod def get_by_id(cls, id_: int) -> Optional["CoprBuildTargetModel"]: with get_sa_session() as session: return session.query(CoprBuildTargetModel).filter_by(id=id_).first() @classmethod def get_all(cls) -> Optional[Iterable["CoprBuildTargetModel"]]: with get_sa_session() as session: return ( session.query(CoprBuildTargetModel) .order_by(desc(CoprBuildTargetModel.id)) .all() ) @classmethod def get_merged_chroots( cls, first: int, last: int ) -> Iterable["CoprBuildTargetModel"]: """Returns a list of unique build ids with merged status, chroots Details: https://github.com/packit/packit-service/pull/674#discussion_r439819852 """ with get_sa_session() as session: return ( session.query( # We need something to order our merged builds by, # so set new_id to be min(ids of to-be-merged rows) func.min(CoprBuildTargetModel.id).label("new_id"), # Select identical element(s) CoprBuildTargetModel.build_id, # Merge chroots and statuses from different rows into one func.array_agg(psql_array([CoprBuildTargetModel.target])).label( "target" ), func.array_agg(psql_array([CoprBuildTargetModel.status])).label( "status" ), func.array_agg(psql_array([CoprBuildTargetModel.id])).label( "packit_id_per_chroot" ), ) .group_by( CoprBuildTargetModel.build_id ) # Group by identical element(s) .order_by(desc("new_id")) .slice(first, last) ) # Returns all builds with that build_id, irrespective of target @classmethod def get_all_by_build_id( cls, build_id: Union[str, int] ) -> Optional[Iterable["CoprBuildTargetModel"]]: if isinstance(build_id, int): # See the comment in get_by_build_id() build_id = str(build_id) with get_sa_session() as session: return session.query(CoprBuildTargetModel).filter_by(build_id=build_id) @classmethod def get_all_by_status( cls, status: str ) -> Optional[Iterable["CoprBuildTargetModel"]]: """Returns all builds which currently have the given status.""" with get_sa_session() as session: return session.query(CoprBuildTargetModel).filter_by(status=status) # returns the build matching the build_id and the target @classmethod def get_by_build_id( cls, build_id: Union[str, int], target: str = None ) -> Optional["CoprBuildTargetModel"]: if isinstance(build_id, int): # PG is pesky about this: # LINE 3: WHERE copr_builds.build_id = 1245767 AND copr_builds.target ... # HINT: No operator matches the given name and argument type(s). # You might need to add explicit type casts. build_id = str(build_id) with get_sa_session() as session: query = session.query(CoprBuildTargetModel).filter_by(build_id=build_id) if target: query = query.filter_by(target=target) return query.first() @staticmethod def get_all_by( project_name: str, commit_sha: str, owner: str = None, target: str = None, ) -> Optional[Iterable["CoprBuildTargetModel"]]: """ All owner/project_name builds sorted from latest to oldest with the given commit_sha and optional target. """ non_none_args = { arg: value for arg, value in locals().items() if value is not None } with get_sa_session() as session: query = ( session.query(CoprBuildTargetModel) .filter_by(**non_none_args) .order_by(CoprBuildTargetModel.build_id.desc()) ) return query.all() @classmethod def get_all_by_commit( cls, commit_sha: str ) -> Optional[Iterable["CoprBuildTargetModel"]]: """Returns all builds that match a given commit sha""" with get_sa_session() as session: query = session.query(CoprBuildTargetModel).filter_by(commit_sha=commit_sha) return query.all() @classmethod def create( cls, build_id: str, commit_sha: str, project_name: str, owner: str, web_url: str, target: str, status: str, run_model: "PipelineModel", task_accepted_time: Optional[datetime] = None, ) -> "CoprBuildTargetModel": with get_sa_session() as session: build = cls() build.build_id = build_id build.status = status build.project_name = project_name build.owner = owner build.commit_sha = commit_sha build.web_url = web_url build.target = target build.task_accepted_time = task_accepted_time session.add(build) if run_model.copr_build: # Clone run model new_run_model = PipelineModel.create( type=run_model.job_trigger.type, trigger_id=run_model.job_trigger.trigger_id, ) new_run_model.srpm_build = run_model.srpm_build new_run_model.copr_build = build session.add(new_run_model) else: run_model.copr_build = build session.add(run_model) return build @classmethod def get( cls, build_id: str, target: str, ) -> "CoprBuildTargetModel": return cls.get_by_build_id(build_id, target) def __repr__(self): return f"COPRBuildModel(id={self.id}, build_submitted_time={self.build_submitted_time})" class KojiBuildTargetModel(ProjectAndTriggersConnector, Base): """we create an entry for every target""" __tablename__ = "koji_build_targets" id = Column(Integer, primary_key=True) build_id = Column(String, index=True) # koji build id # commit sha of the PR (or a branch, release) we used for a build commit_sha = Column(String) # what's the build status? status = Column(String) # chroot, but we use the word target in our docs target = Column(String) # URL to koji web ui for the particular build web_url = Column(String) # url to koji build logs build_logs_url = Column(String) # datetime.utcnow instead of datetime.utcnow() because its an argument to the function # so it will run when the koji build is initiated, not when the table is made build_submitted_time = Column(DateTime, default=datetime.utcnow) build_start_time = Column(DateTime) build_finished_time = Column(DateTime) # metadata for the build which didn't make it to schema yet # metadata is reserved to sqlalch data = Column(JSON) # it is a scratch build? scratch = Column(Boolean) runs = relationship("PipelineModel", back_populates="koji_build") def set_status(self, status: str): with get_sa_session() as session: self.status = status session.add(self) def set_build_logs_url(self, build_logs: str): with get_sa_session() as session: self.build_logs_url = build_logs session.add(self) def set_web_url(self, web_url: str): with get_sa_session() as session: self.web_url = web_url session.add(self) def set_build_start_time(self, build_start_time: Optional[DateTime]): with get_sa_session() as session: self.build_start_time = build_start_time session.add(self) def set_build_finished_time(self, build_finished_time: Optional[DateTime]): with get_sa_session() as session: self.build_finished_time = build_finished_time session.add(self) def set_build_submitted_time(self, build_submitted_time: Optional[DateTime]): with get_sa_session() as session: self.build_submitted_time = build_submitted_time session.add(self) def set_scratch(self, value: bool): with get_sa_session() as session: self.scratch = value session.add(self) def get_srpm_build(self) -> Optional["SRPMBuildModel"]: if not self.runs: return None # All SRPMBuild models for all the runs have to be same. return self.runs[0].srpm_build @classmethod def get_by_id(cls, id_: int) -> Optional["KojiBuildTargetModel"]: with get_sa_session() as session: return session.query(KojiBuildTargetModel).filter_by(id=id_).first() @classmethod def get_all(cls) -> Optional[Iterable["KojiBuildTargetModel"]]: with get_sa_session() as session: return session.query(KojiBuildTargetModel).all() @classmethod def get_range(cls, first: int, last: int) -> Iterable["KojiBuildTargetModel"]: with get_sa_session() as session: return ( session.query(KojiBuildTargetModel) .order_by(desc(KojiBuildTargetModel.id)) .slice(first, last) ) # Returns all builds with that build_id, irrespective of target @classmethod def get_all_by_build_id( cls, build_id: Union[str, int] ) -> Optional[Iterable["KojiBuildTargetModel"]]: if isinstance(build_id, int): # See the comment in get_by_build_id() build_id = str(build_id) with get_sa_session() as session: return session.query(KojiBuildTargetModel).filter_by(build_id=build_id) @classmethod def get_by_build_id( cls, build_id: Union[str, int], target: Optional[str] = None ) -> Optional["KojiBuildTargetModel"]: """ Returns the build matching the build_id and the target. """ if isinstance(build_id, int): # PG is pesky about this: # LINE 3: WHERE koji_builds.build_id = 1245767 AND koji_builds.target ... # HINT: No operator matches the given name and argument type(s). # You might need to add explicit type casts. build_id = str(build_id) with get_sa_session() as session: if target: return ( session.query(KojiBuildTargetModel) .filter_by(build_id=build_id, target=target) .first() ) return ( session.query(KojiBuildTargetModel).filter_by(build_id=build_id).first() ) @classmethod def create( cls, build_id: str, commit_sha: str, web_url: str, target: str, status: str, scratch: bool, run_model: "PipelineModel", ) -> "KojiBuildTargetModel": with get_sa_session() as session: build = cls() build.build_id = build_id build.status = status build.commit_sha = commit_sha build.web_url = web_url build.target = target build.scratch = scratch session.add(build) if run_model.koji_build: # Clone run model new_run_model = PipelineModel.create( type=run_model.job_trigger.type, trigger_id=run_model.job_trigger.trigger_id, ) new_run_model.srpm_build = run_model.srpm_build new_run_model.koji_build = build session.add(new_run_model) else: run_model.koji_build = build session.add(run_model) return build @classmethod def get( cls, build_id: str, target: str, ) -> Optional["KojiBuildTargetModel"]: return cls.get_by_build_id(build_id, target) def __repr__(self): return ( f"KojiBuildTargetModel(id={self.id}, " f"build_submitted_time={self.build_submitted_time})" ) class SRPMBuildModel(ProjectAndTriggersConnector, Base): __tablename__ = "srpm_builds" id = Column(Integer, primary_key=True) status = Column(String) # our logs we want to show to the user logs = Column(Text) build_submitted_time = Column(DateTime, default=datetime.utcnow) build_start_time = Column(DateTime) build_finished_time = Column(DateTime) commit_sha = Column(String) # url for downloading the SRPM url = Column(Text) # attributes for SRPM built by Copr logs_url = Column(Text) copr_build_id = Column(String, index=True) copr_web_url = Column(Text) runs = relationship("PipelineModel", back_populates="srpm_build") @classmethod def create_with_new_run( cls, trigger_model: AbstractTriggerDbType, commit_sha: str, copr_build_id: Optional[str] = None, copr_web_url: Optional[str] = None, ) -> Tuple["SRPMBuildModel", "PipelineModel"]: """ Create a new model for SRPM and connect it to the PipelineModel. * New SRPMBuildModel model will have connection to a new PipelineModel. * The newly created PipelineModel can reuse existing JobTriggerModel (e.g.: one pull-request can have multiple runs). More specifically: * On PR creation: -> SRPMBuildModel is created. -> New PipelineModel is created. -> JobTriggerModel is created. * On `/packit build` comment or new push: -> SRPMBuildModel is created. -> New PipelineModel is created. -> JobTriggerModel is reused. * On `/packit test` comment: -> SRPMBuildModel and CoprBuildTargetModel are reused. -> New TFTTestRunTargetModel is created. -> New PipelineModel is created and collects this new TFTTestRunTargetModel with old SRPMBuildModel and CoprBuildTargetModel. """ with get_sa_session() as session: srpm_build = cls() srpm_build.status = "pending" srpm_build.commit_sha = commit_sha srpm_build.copr_build_id = copr_build_id srpm_build.copr_web_url = copr_web_url session.add(srpm_build) # Create a new run model, reuse trigger_model if it exists: new_run_model = PipelineModel.create( type=trigger_model.job_trigger_model_type, trigger_id=trigger_model.id ) new_run_model.srpm_build = srpm_build session.add(new_run_model) return srpm_build, new_run_model @classmethod def get_by_id( cls, id_: int, ) -> Optional["SRPMBuildModel"]: with get_sa_session() as session: return session.query(SRPMBuildModel).filter_by(id=id_).first() @classmethod def get(cls, first: int, last: int) -> Iterable["SRPMBuildModel"]: with get_sa_session() as session: return ( session.query(SRPMBuildModel) .order_by(desc(SRPMBuildModel.id)) .slice(first, last) ) @classmethod def get_by_copr_build_id( cls, copr_build_id: Union[str, int] ) -> Optional["SRPMBuildModel"]: if isinstance(copr_build_id, int): copr_build_id = str(copr_build_id) with get_sa_session() as session: return ( session.query(SRPMBuildModel) .filter_by(copr_build_id=copr_build_id) .first() ) @classmethod def get_older_than(cls, delta: timedelta) -> Iterable["SRPMBuildModel"]: """Return builds older than delta, whose logs/artifacts haven't been discarded yet.""" delta_ago = datetime.utcnow() - delta with get_sa_session() as session: return session.query(SRPMBuildModel).filter( SRPMBuildModel.build_submitted_time < delta_ago, SRPMBuildModel.logs.isnot(None), ) def set_url(self, url: Optional[str]) -> None: with get_sa_session() as session: self.url = null() if url is None else url session.add(self) def set_logs(self, logs: Optional[str]) -> None: with get_sa_session() as session: self.logs = null() if logs is None else logs session.add(self) def set_start_time(self, start_time: datetime) -> None: with get_sa_session() as session: self.build_start_time = start_time session.add(self) def set_end_time(self, end_time: datetime) -> None: with get_sa_session() as session: self.build_finished_time = end_time session.add(self) def set_build_logs_url(self, logs_url: str) -> None: with get_sa_session() as session: self.logs_url = logs_url session.add(self) def set_status(self, status: str) -> None: with get_sa_session() as session: self.status = status session.add(self) def __repr__(self): return f"SRPMBuildModel(id={self.id}, build_submitted_time={self.build_submitted_time})" class AllowlistStatus(str, enum.Enum): approved_automatically = ALLOWLIST_CONSTANTS["approved_automatically"] waiting = ALLOWLIST_CONSTANTS["waiting"] approved_manually = ALLOWLIST_CONSTANTS["approved_manually"] denied = ALLOWLIST_CONSTANTS["denied"] class AllowlistModel(Base): __tablename__ = "allowlist" id = Column(Integer, primary_key=True) namespace = Column(String, index=True) # renamed from account_name status = Column(Enum(AllowlistStatus)) fas_account = Column(String) @classmethod def add_namespace( cls, namespace: str, status: str, fas_account: Optional[str] = None ): """ Adds namespace with specific status to the allowlist. If namespace is present, just changes the status. Args: namespace (str): Namespace to be added. Can be `github.com/namespace` or specific repository `github.com/namespace/repository.git`. status (str): Status to be set. AllowlistStatus enumeration as string. fas_account (Optional[str]): FAS login, in case the namespace was automatically approved through the FAS login of user that installed GitHub App. Defaults to `None`. Returns: Newly created entry or entry that represents requested namespace. """ with get_sa_session() as session: namespace_entry = cls.get_namespace(namespace) if not namespace_entry: namespace_entry = cls() namespace_entry.namespace = namespace namespace_entry.status = status if fas_account: namespace_entry.fas_account = fas_account session.add(namespace_entry) return namespace_entry @classmethod def get_namespace(cls, namespace: str) -> Optional["AllowlistModel"]: """ Retrieves namespace from the allowlist. Args: namespace (str): Namespace to be added. Can be `github.com/namespace` or specific repository `github.com/namespace/repository.git`. Returns: Entry that represents namespace or `None` if cannot be found. """ with get_sa_session() as session: return session.query(AllowlistModel).filter_by(namespace=namespace).first() @classmethod def get_namespaces_by_status( cls, status: str ) -> Optional[Iterable["AllowlistModel"]]: """ Get list of namespaces with specific status. Args: status (str): Status of the namespaces. AllowlistStatus enumeration as string. Returns: List of the namespaces with set status. """ with get_sa_session() as session: return session.query(AllowlistModel).filter_by(status=status) @classmethod def remove_namespace(cls, namespace: str) -> Optional["AllowlistModel"]: with get_sa_session() as session: namespace_entry = session.query(AllowlistModel).filter_by( namespace=namespace ) if namespace_entry: namespace_entry.delete() return namespace_entry @classmethod def get_all(cls) -> Optional[Iterable["AllowlistModel"]]: with get_sa_session() as session: return session.query(AllowlistModel).all() def to_dict(self) -> Dict[str, str]: return { "namespace": self.namespace, "status": self.status, "fas_account": self.fas_account, } def __repr__(self): return ( f'<AllowlistModel(namespace="{self.namespace}", ' f'status="{self.status}", ' f'fas_account="{self.fas_account}")>' ) class TestingFarmResult(str, enum.Enum): new = "new" queued = "queued" running = "running" passed = "passed" failed = "failed" skipped = "skipped" error = "error" unknown = "unknown" needs_inspection = "needs_inspection" class TFTTestRunTargetModel(ProjectAndTriggersConnector, Base): __tablename__ = "tft_test_run_targets" id = Column(Integer, primary_key=True) pipeline_id = Column(String, index=True) commit_sha = Column(String) status = Column(Enum(TestingFarmResult)) target = Column(String) web_url = Column(String) # datetime.utcnow instead of datetime.utcnow() because its an argument to the function # so it will run when the model is initiated, not when the table is made submitted_time = Column(DateTime, default=datetime.utcnow) data = Column(JSON) runs = relationship("PipelineModel", back_populates="test_run") def set_status(self, status: TestingFarmResult, created: Optional[DateTime] = None): """ set status of the TF run and optionally set the created datetime as well """ with get_sa_session() as session: self.status = status if created and not self.submitted_time: self.submitted_time = created session.add(self) def set_web_url(self, web_url: str): with get_sa_session() as session: self.web_url = web_url session.add(self) @classmethod def create( cls, pipeline_id: str, commit_sha: str, status: TestingFarmResult, target: str, run_model: "PipelineModel", web_url: Optional[str] = None, data: dict = None, ) -> "TFTTestRunTargetModel": with get_sa_session() as session: test_run = cls() test_run.pipeline_id = pipeline_id test_run.commit_sha = commit_sha test_run.status = status test_run.target = target test_run.web_url = web_url test_run.data = data session.add(test_run) if run_model.test_run: # Clone run model new_run_model = PipelineModel.create( type=run_model.job_trigger.type, trigger_id=run_model.job_trigger.trigger_id, ) new_run_model.srpm_build = run_model.srpm_build new_run_model.copr_build = run_model.copr_build new_run_model.test_run = test_run session.add(new_run_model) else: run_model.test_run = test_run session.add(run_model) return test_run @classmethod def get_by_pipeline_id(cls, pipeline_id: str) -> Optional["TFTTestRunTargetModel"]: with get_sa_session() as session: return ( session.query(TFTTestRunTargetModel) .filter_by(pipeline_id=pipeline_id) .first() ) @classmethod def get_all_by_status( cls, *status: TestingFarmResult ) -> Optional[Iterable["TFTTestRunTargetModel"]]: """Returns all runs which currently have their status set to one of the requested statuses.""" with get_sa_session() as session: return session.query(TFTTestRunTargetModel).filter( TFTTestRunTargetModel.status.in_(status) ) @classmethod def get_by_id(cls, id: int) -> Optional["TFTTestRunTargetModel"]: with get_sa_session() as session: return session.query(TFTTestRunTargetModel).filter_by(id=id).first() @staticmethod def get_all_by_commit_target( commit_sha: str, target: str = None, ) -> Optional[Iterable["TFTTestRunTargetModel"]]: """ All tests with the given commit_sha and optional target. """ non_none_args = { arg: value for arg, value in locals().items() if value is not None } with get_sa_session() as session: query = session.query(TFTTestRunTargetModel).filter_by(**non_none_args) return query.all() @classmethod def get_range( cls, first: int, last: int ) -> Optional[Iterable["TFTTestRunTargetModel"]]: with get_sa_session() as session: return ( session.query(TFTTestRunTargetModel) .order_by(desc(TFTTestRunTargetModel.id)) .slice(first, last) ) def __repr__(self): return f"TFTTestRunTargetModel(id={self.id}, pipeline_id={self.pipeline_id})" class ProposeDownstreamTargetStatus(str, enum.Enum): queued = "queued" running = "running" error = "error" retry = "retry" submitted = "submitted" class ProposeDownstreamTargetModel(ProjectAndTriggersConnector, Base): __tablename__ = "propose_downstream_run_targets" id = Column(Integer, primary_key=True) branch = Column(String, default="unknown") downstream_pr_url = Column(String) status = Column(Enum(ProposeDownstreamTargetStatus)) submitted_time = Column(DateTime, default=datetime.utcnow) start_time = Column(DateTime) finished_time = Column(DateTime) logs = Column(Text) propose_downstream_id = Column(Integer, ForeignKey("propose_downstream_runs.id")) propose_downstream = relationship( "ProposeDownstreamModel", back_populates="propose_downstream_targets" ) def __repr__(self) -> str: return f"ProposeDownstreamTargetModel(id={self.id})" @classmethod def create( cls, status: ProposeDownstreamTargetStatus, ) -> "ProposeDownstreamTargetModel": with get_sa_session() as session: downstream_pr = cls() downstream_pr.status = status session.add(downstream_pr) return downstream_pr def set_status(self, status: ProposeDownstreamTargetStatus) -> None: with get_sa_session() as session: self.status = status session.add(self) def set_downstream_pr_url(self, downstream_pr_url: str) -> None: with get_sa_session() as session: self.downstream_pr_url = downstream_pr_url session.add(self) def set_start_time(self, start_time: DateTime) -> None: with get_sa_session() as session: self.start_time = start_time session.add(self) def set_finished_time(self, finished_time: DateTime) -> None: with get_sa_session() as session: self.finished_time = finished_time session.add(self) def set_branch(self, branch: str) -> None: with get_sa_session() as session: self.branch = branch session.add(self) def set_logs(self, logs: str) -> None: with get_sa_session() as session: self.logs = logs session.add(self) @classmethod def get_by_id(cls, id_: int) -> Optional["ProposeDownstreamTargetModel"]: with get_sa_session() as session: return session.query(ProposeDownstreamTargetModel).filter_by(id=id_).first() class ProposeDownstreamStatus(str, enum.Enum): running = "running" finished = "finished" error = "error" class ProposeDownstreamModel(ProjectAndTriggersConnector, Base): __tablename__ = "propose_downstream_runs" id = Column(Integer, primary_key=True) status = Column(Enum(ProposeDownstreamStatus)) submitted_time = Column(DateTime, default=datetime.utcnow) runs = relationship("PipelineModel", back_populates="propose_downstream_run") propose_downstream_targets = relationship( "ProposeDownstreamTargetModel", back_populates="propose_downstream" ) def __repr__(self) -> str: return f"ProposeDownstreamModel(id={self.id}, submitted_time={self.submitted_time})" @classmethod def create_with_new_run( cls, status: ProposeDownstreamStatus, trigger_model: AbstractTriggerDbType, ) -> Tuple["ProposeDownstreamModel", "PipelineModel"]: """ Create a new model for ProposeDownstream and connect it to the PipelineModel. * New ProposeDownstreamModel model will have connection to a new PipelineModel. * The newly created PipelineModel can reuse existing JobTriggerModel (e.g.: one IssueModel can have multiple runs). More specifically: * On `/packit propose-downstream` issue comment: -> ProposeDownstreamModel is created. -> New PipelineModel is created. -> JobTriggerModel is created. * Something went wrong, after correction and another `/packit propose-downstream` comment: -> ProposeDownstreamModel is created. -> PipelineModel is created. -> JobTriggerModel is reused. * TODO: we will use propose-downstream in commit-checks - fill in once it's implemented """ with get_sa_session() as session: propose_downstream = cls() propose_downstream.status = status session.add(propose_downstream) # Create a pipeline, reuse trigger_model if it exists: pipeline = PipelineModel.create( type=trigger_model.job_trigger_model_type, trigger_id=trigger_model.id ) pipeline.propose_downstream_run = propose_downstream session.add(pipeline) return propose_downstream, pipeline def set_status(self, status: ProposeDownstreamStatus) -> None: with get_sa_session() as session: self.status = status session.add(self) @classmethod def get_by_id(cls, id_: int) -> Optional["ProposeDownstreamModel"]: with get_sa_session() as session: return session.query(ProposeDownstreamModel).filter_by(id=id_).first() @classmethod def get_all_by_status( cls, status: str ) -> Optional[Iterable["ProposeDownstreamModel"]]: with get_sa_session() as session: return session.query(ProposeDownstreamModel).filter_by(status=status) @classmethod def get_range(cls, first: int, last: int) -> Iterable["ProposeDownstreamModel"]: with get_sa_session() as session: return ( session.query(ProposeDownstreamModel) .order_by(desc(ProposeDownstreamModel.id)) .slice(first, last) ) AbstractBuildTestDbType = Union[ CoprBuildTargetModel, KojiBuildTargetModel, SRPMBuildModel, TFTTestRunTargetModel, ProposeDownstreamModel, ] class ProjectAuthenticationIssueModel(Base): __tablename__ = "project_authentication_issue" id = Column(Integer, primary_key=True) project = relationship( "GitProjectModel", back_populates="project_authentication_issue" ) # Check to know if we created a issue for the repo. issue_created = Column(Boolean) project_id = Column(Integer, ForeignKey("git_projects.id")) @classmethod def get_project( cls, namespace: str, repo_name: str, project_url: str ) -> Optional["ProjectAuthenticationIssueModel"]: with get_sa_session() as session: project = GitProjectModel.get_or_create( namespace=namespace, repo_name=repo_name, project_url=project_url ) return ( session.query(ProjectAuthenticationIssueModel) .filter_by(project_id=project.id) .first() ) @classmethod def create( cls, namespace: str, repo_name: str, project_url: str, issue_created: bool ) -> "ProjectAuthenticationIssueModel": with get_sa_session() as session: project = GitProjectModel.get_or_create( namespace=namespace, repo_name=repo_name, project_url=project_url ) project_authentication_issue = cls() project_authentication_issue.issue_created = issue_created project_authentication_issue.project_id = project.id session.add(project_authentication_issue) return project_authentication_issue def __repr__(self): return ( f"ProjectAuthenticationIssueModel(project={self.project}, " f"issue_created={self.issue_created})" ) class GithubInstallationModel(Base): __tablename__ = "github_installations" id = Column(Integer, primary_key=True, autoincrement=True) # information about account (user/organization) into which the app has been installed account_login = Column(String) account_id = Column(Integer) account_url = Column(String) account_type = Column(String) # information about user who installed the app into 'account' sender_id = Column(Integer) sender_login = Column(String) created_at = Column(DateTime, default=datetime.utcnow) repositories = Column(ARRAY(Integer, ForeignKey("git_projects.id"))) @classmethod def get_project(cls, repository: str): namespace, repo_name = repository.split("/") return GitProjectModel.get_or_create( namespace=namespace, repo_name=repo_name, project_url=f"https://github.com/{namespace}/{repo_name}", ) @classmethod def get_by_id(cls, id: int) -> Optional["GithubInstallationModel"]: with get_sa_session() as session: return session.query(GithubInstallationModel).filter_by(id=id).first() @classmethod def get_by_account_login( cls, account_login: str ) -> Optional["GithubInstallationModel"]: with get_sa_session() as session: return ( session.query(GithubInstallationModel) .filter_by(account_login=account_login) .first() ) @classmethod def get_all(cls) -> Optional[Iterable["GithubInstallationModel"]]: with get_sa_session() as session: return session.query(GithubInstallationModel).all() @classmethod def create(cls, event): with get_sa_session() as session: installation = cls.get_by_account_login(event.account_login) if not installation: installation = cls() installation.account_login = event.account_login installation.account_id = event.account_id installation.account_url = event.account_url installation.account_type = event.account_type installation.sender_login = event.sender_login installation.sender_id = event.sender_id installation.created_at = event.created_at installation.repositories = [ cls.get_project(repo).id for repo in event.repositories ] session.add(installation) return installation def to_dict(self): return { "account_login": self.account_login, "account_id": self.account_id, "account_type": self.account_type, "account_url": self.account_url, "sender_login": self.sender_login, "sender_id": self.sender_id, # Inconsistent with other API endpoints, kept for readability for # internal use, if necessary "created_at": optional_time(self.created_at), } def __repr__(self): return f"GithubInstallationModel(id={self.id}, account={self.account_login})" class SourceGitPRDistGitPRModel(Base): __tablename__ = "source_git_pr_dist_git_pr" id = Column(Integer, primary_key=True) # our database PK source_git_pull_request_id = Column( Integer, ForeignKey("pull_requests.id"), unique=True, index=True ) dist_git_pull_request_id = Column( Integer, ForeignKey("pull_requests.id"), unique=True, index=True ) source_git_pull_request = relationship( "PullRequestModel", primaryjoin="SourceGitPRDistGitPRModel.source_git_pull_request_id==PullRequestModel.id", uselist=False, ) dist_git_pull_request = relationship( "PullRequestModel", primaryjoin="SourceGitPRDistGitPRModel.dist_git_pull_request_id==PullRequestModel.id", uselist=False, ) @classmethod def get_or_create( cls, source_git_pr_id: int, source_git_namespace: str, source_git_repo_name: str, source_git_project_url: str, dist_git_pr_id: int, dist_git_namespace: str, dist_git_repo_name: str, dist_git_project_url: str, ) -> "SourceGitPRDistGitPRModel": with get_sa_session() as session: source_git_pull_request = PullRequestModel.get_or_create( pr_id=source_git_pr_id, namespace=source_git_namespace, repo_name=source_git_repo_name, project_url=source_git_project_url, ) dist_git_pull_request = PullRequestModel.get_or_create( pr_id=dist_git_pr_id, namespace=dist_git_namespace, repo_name=dist_git_repo_name, project_url=dist_git_project_url, ) rel = ( session.query(SourceGitPRDistGitPRModel) .filter_by(source_git_pull_request_id=source_git_pull_request.id) .filter_by(dist_git_pull_request_id=dist_git_pull_request.id) .one_or_none() ) if not rel: rel = SourceGitPRDistGitPRModel() rel.source_git_pull_request_id = source_git_pull_request.id rel.dist_git_pull_request_id = dist_git_pull_request.id session.add(rel) return rel @classmethod def get_by_id(cls, id_: int) -> Optional["SourceGitPRDistGitPRModel"]: with get_sa_session() as session: return ( session.query(SourceGitPRDistGitPRModel).filter_by(id=id_).one_or_none() ) @classmethod def get_by_source_git_id(cls, id_: int) -> Optional["SourceGitPRDistGitPRModel"]: with get_sa_session() as session: return ( session.query(SourceGitPRDistGitPRModel) .filter_by(source_git_pull_request_id=id_) .one_or_none() ) @classmethod def get_by_dist_git_id(cls, id_: int) -> Optional["SourceGitPRDistGitPRModel"]: with get_sa_session() as session: return ( session.query(SourceGitPRDistGitPRModel) .filter_by(dist_git_pull_request_id=id_) .one_or_none() )
# Copyright Contributors to the Packit project. # SPDX-License-Identifier: MIT """ Data layer on top of PSQL using sqlalch """ import enum import logging import os from contextlib import contextmanager from datetime import datetime, timedelta from typing import ( Dict, Iterable, List, Optional, TYPE_CHECKING, Tuple, Type, Union, ) from urllib.parse import urlparse from sqlalchemy import ( Boolean, Column, DateTime, Enum, ForeignKey, Integer, JSON, String, Text, create_engine, desc, func, null, case, ) from sqlalchemy.dialects.postgresql import array as psql_array from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import Session, relationship, scoped_session, sessionmaker from sqlalchemy.types import ARRAY from packit.config import JobConfigTriggerType from packit.exceptions import PackitException from packit_service.constants import ALLOWLIST_CONSTANTS logger = logging.getLogger(__name__) # SQLAlchemy session, get it with `get_sa_session` session_instance = None def get_pg_url() -> str: """create postgresql connection string""" return ( f"postgresql+psycopg2://{os.getenv('POSTGRESQL_USER')}" f":{os.getenv('POSTGRESQL_PASSWORD')}@{os.getenv('POSTGRESQL_HOST', 'postgres')}" f":{os.getenv('POSTGRESQL_PORT', '5432')}/{os.getenv('POSTGRESQL_DATABASE')}" ) engine = create_engine(get_pg_url()) ScopedSession = scoped_session(sessionmaker(bind=engine)) @contextmanager def get_sa_session() -> Session: """get SQLAlchemy session""" session = ScopedSession() try: yield session session.commit() except Exception as ex: logger.warning(f"Exception while working with database: {ex!r}") session.rollback() raise def optional_time( datetime_object: Union[datetime, None], fmt: str = "%d/%m/%Y %H:%M:%S" ) -> Union[str, None]: """ Returns a formatted date-time string if argument is a datetime object. Args: datetime_object: date-time to be converted to string fmt: format string to be used to produce the string. Defaults to `"%d/%m/%Y %H:%M:%S"`. Returns: Formatted date-time or `None` if no datetime is provided. """ if datetime_object is None: return None return datetime_object.strftime(fmt) def optional_timestamp(datetime_object: Optional[datetime]) -> Optional[int]: """ Returns a UNIX timestamp if argument is a datetime object. Args: datetime_object: Date-time to be converted to timestamp. Returns: UNIX timestamp or `None` if no datetime object is provided. """ if datetime_object is None: return None return int(datetime_object.timestamp()) # https://github.com/python/mypy/issues/2477#issuecomment-313984522 ^_^ if TYPE_CHECKING: Base = object else: Base = declarative_base() class JobTriggerModelType(str, enum.Enum): pull_request = "pull_request" branch_push = "branch_push" release = "release" issue = "issue" class BuildsAndTestsConnector: """ Abstract class that is inherited by trigger models to share methods for accessing build/test models.. """ id: int job_trigger_model_type: JobTriggerModelType def get_runs(self) -> List["PipelineModel"]: with get_sa_session() as session: trigger_list = ( session.query(JobTriggerModel) .filter_by(type=self.job_trigger_model_type, trigger_id=self.id) .all() ) if len(trigger_list) > 1: msg = ( f"There are multiple run models for type {self.job_trigger_model_type}" f"and id={self.id}." ) logger.error(msg) raise PackitException(msg) return trigger_list[0].runs if trigger_list else [] def _get_run_item( self, model_type: Type["AbstractBuildTestDbType"] ) -> List["AbstractBuildTestDbType"]: runs = self.get_runs() models = [] if model_type == CoprBuildTargetModel: models = [run.copr_build for run in runs] if model_type == KojiBuildTargetModel: models = [run.koji_build for run in runs] if model_type == SRPMBuildModel: models = [run.srpm_build for run in runs] if model_type == TFTTestRunTargetModel: models = [run.test_run for run in runs] return list({model for model in models if model is not None}) def get_copr_builds(self): return self._get_run_item(model_type=CoprBuildTargetModel) def get_koji_builds(self): return self._get_run_item(model_type=KojiBuildTargetModel) def get_srpm_builds(self): return self._get_run_item(model_type=SRPMBuildModel) def get_test_runs(self): return self._get_run_item(model_type=TFTTestRunTargetModel) class ProjectAndTriggersConnector: """ Abstract class that is inherited by build/test models to share methods for accessing project and trigger models. """ runs: Optional[List["PipelineModel"]] def get_job_trigger_model(self) -> Optional["JobTriggerModel"]: if not self.runs: return None return self.runs[0].job_trigger def get_trigger_object(self) -> Optional["AbstractTriggerDbType"]: job_trigger = self.get_job_trigger_model() if not job_trigger: return None return job_trigger.get_trigger_object() def get_project(self) -> Optional["GitProjectModel"]: trigger_object = self.get_trigger_object() if not trigger_object: return None return trigger_object.project def get_pr_id(self) -> Optional[int]: trigger_object = self.get_trigger_object() if isinstance(trigger_object, PullRequestModel): return trigger_object.pr_id return None def get_issue_id(self) -> Optional[int]: trigger_object = self.get_trigger_object() if not isinstance(trigger_object, IssueModel): return None return trigger_object.issue_id def get_branch_name(self) -> Optional[str]: trigger_object = self.get_trigger_object() if isinstance(trigger_object, GitBranchModel): return trigger_object.name return None def get_release_tag(self) -> Optional[str]: trigger_object = self.get_trigger_object() if isinstance(trigger_object, ProjectReleaseModel): return trigger_object.tag_name return None class GitProjectModel(Base): __tablename__ = "git_projects" id = Column(Integer, primary_key=True) # github.com/NAMESPACE/REPO_NAME # git.centos.org/NAMESPACE/REPO_NAME namespace = Column(String, index=True) repo_name = Column(String, index=True) pull_requests = relationship("PullRequestModel", back_populates="project") branches = relationship("GitBranchModel", back_populates="project") releases = relationship("ProjectReleaseModel", back_populates="project") issues = relationship("IssueModel", back_populates="project") project_authentication_issue = relationship( "ProjectAuthenticationIssueModel", back_populates="project" ) # Git URL of the repo # Example: https://github.com/packit/hello-world.git https_url = Column(String) project_url = Column(String) instance_url = Column(String, nullable=False) def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.instance_url = urlparse(self.project_url).hostname @classmethod def get_or_create( cls, namespace: str, repo_name: str, project_url: str ) -> "GitProjectModel": with get_sa_session() as session: project = ( session.query(GitProjectModel) .filter_by( namespace=namespace, repo_name=repo_name, project_url=project_url ) .first() ) if not project: project = cls( repo_name=repo_name, namespace=namespace, project_url=project_url ) session.add(project) return project @classmethod def get_projects(cls, first: int, last: int) -> Iterable["GitProjectModel"]: with get_sa_session() as session: return ( session.query(GitProjectModel) .order_by(GitProjectModel.namespace) .slice(first, last) ) @classmethod def get_forge( cls, first: int, last: int, forge: str ) -> Iterable["GitProjectModel"]: """Return projects of given forge""" with get_sa_session() as session: return ( session.query(GitProjectModel) .filter_by(instance_url=forge) .order_by(GitProjectModel.namespace) .slice(first, last) ) @classmethod def get_namespace(cls, forge: str, namespace: str) -> Iterable["GitProjectModel"]: """Return projects of given forge and namespace""" with get_sa_session() as session: projects = ( session.query(GitProjectModel).filter_by(namespace=namespace).all() ) matched_projects = [] for project in projects: forge_domain = urlparse(project.project_url).hostname if forge == forge_domain: matched_projects.append(project) return matched_projects @classmethod def get_project( cls, forge: str, namespace: str, repo_name: str ) -> Optional["GitProjectModel"]: """Return one project which matches said criteria""" with get_sa_session() as session: project = ( session.query(cls) .filter_by(instance_url=forge, namespace=namespace, repo_name=repo_name) .one_or_none() ) return project @classmethod def get_project_prs( cls, first: int, last: int, forge: str, namespace: str, repo_name: str ) -> Iterable["PullRequestModel"]: with get_sa_session() as session: return ( session.query(PullRequestModel) .join(GitProjectModel) .filter( PullRequestModel.project_id == GitProjectModel.id, GitProjectModel.instance_url == forge, GitProjectModel.namespace == namespace, GitProjectModel.repo_name == repo_name, ) .order_by(desc(PullRequestModel.pr_id)) .slice(first, last) ) @classmethod def get_project_issues( cls, forge: str, namespace: str, repo_name: str ) -> Optional[Iterable["IssueModel"]]: with get_sa_session() as session: issues = ( session.query(IssueModel) .join(GitProjectModel) .filter( IssueModel.project_id == GitProjectModel.id, GitProjectModel.instance_url == forge, GitProjectModel.namespace == namespace, GitProjectModel.repo_name == repo_name, ) .all() ) return issues @classmethod def get_project_branches( cls, forge: str, namespace: str, repo_name: str ) -> Optional[Iterable["GitBranchModel"]]: with get_sa_session() as session: branches = ( session.query(GitBranchModel) .join(GitProjectModel) .filter( GitBranchModel.project_id == GitProjectModel.id, GitProjectModel.instance_url == forge, GitProjectModel.namespace == namespace, GitProjectModel.repo_name == repo_name, ) .all() ) return branches @classmethod def get_project_releases( cls, forge: str, namespace: str, repo_name: str ) -> Optional[Iterable["ProjectReleaseModel"]]: with get_sa_session() as session: releases = ( session.query(ProjectReleaseModel) .join(GitProjectModel) .filter( ProjectReleaseModel.project_id == GitProjectModel.id, GitProjectModel.instance_url == forge, GitProjectModel.namespace == namespace, GitProjectModel.repo_name == repo_name, ) .all() ) return releases def __repr__(self): return ( f"GitProjectModel(name={self.namespace}/{self.repo_name}, " f"project_url='{self.project_url}')" ) class PullRequestModel(BuildsAndTestsConnector, Base): __tablename__ = "pull_requests" id = Column(Integer, primary_key=True) # our database PK # GitHub PR ID # this is not our PK b/c: # 1) we don't control it # 2) we want sensible auto-incremented ID, not random numbers # 3) it's not unique across projects obviously, so why am I even writing this? pr_id = Column(Integer, index=True) project_id = Column(Integer, ForeignKey("git_projects.id")) project = relationship("GitProjectModel", back_populates="pull_requests") # CentOS Pagure only bugzilla = relationship("BugzillaModel", back_populates="pull_request") job_config_trigger_type = JobConfigTriggerType.pull_request job_trigger_model_type = JobTriggerModelType.pull_request @classmethod def get_or_create( cls, pr_id: int, namespace: str, repo_name: str, project_url: str ) -> "PullRequestModel": with get_sa_session() as session: project = GitProjectModel.get_or_create( namespace=namespace, repo_name=repo_name, project_url=project_url ) pr = ( session.query(PullRequestModel) .filter_by(pr_id=pr_id, project_id=project.id) .first() ) if not pr: pr = PullRequestModel() pr.pr_id = pr_id pr.project_id = project.id session.add(pr) return pr @classmethod def get_by_id(cls, id_: int) -> Optional["PullRequestModel"]: with get_sa_session() as session: return session.query(PullRequestModel).filter_by(id=id_).first() def __repr__(self): return f"PullRequestModel(pr_id={self.pr_id}, project={self.project})" class IssueModel(BuildsAndTestsConnector, Base): __tablename__ = "project_issues" id = Column(Integer, primary_key=True) # our database PK issue_id = Column(Integer, index=True) project_id = Column(Integer, ForeignKey("git_projects.id")) project = relationship("GitProjectModel", back_populates="issues") # TODO: Fix this hardcoding! This is only to make propose-downstream work! job_config_trigger_type = JobConfigTriggerType.release job_trigger_model_type = JobTriggerModelType.issue @classmethod def get_or_create( cls, issue_id: int, namespace: str, repo_name: str, project_url: str ) -> "IssueModel": with get_sa_session() as session: project = GitProjectModel.get_or_create( namespace=namespace, repo_name=repo_name, project_url=project_url ) issue = ( session.query(IssueModel) .filter_by(issue_id=issue_id, project_id=project.id) .first() ) if not issue: issue = IssueModel() issue.issue_id = issue_id issue.project_id = project.id session.add(issue) return issue @classmethod def get_by_id(cls, id_: int) -> Optional["IssueModel"]: with get_sa_session() as session: return session.query(IssueModel).filter_by(id=id_).first() def __repr__(self): return f"IssueModel(id={self.issue_id}, project={self.project})" class GitBranchModel(BuildsAndTestsConnector, Base): __tablename__ = "git_branches" id = Column(Integer, primary_key=True) # our database PK name = Column(String) project_id = Column(Integer, ForeignKey("git_projects.id")) project = relationship("GitProjectModel", back_populates="branches") job_config_trigger_type = JobConfigTriggerType.commit job_trigger_model_type = JobTriggerModelType.branch_push @classmethod def get_or_create( cls, branch_name: str, namespace: str, repo_name: str, project_url: str ) -> "GitBranchModel": with get_sa_session() as session: project = GitProjectModel.get_or_create( namespace=namespace, repo_name=repo_name, project_url=project_url ) git_branch = ( session.query(GitBranchModel) .filter_by(name=branch_name, project_id=project.id) .first() ) if not git_branch: git_branch = GitBranchModel() git_branch.name = branch_name git_branch.project_id = project.id session.add(git_branch) return git_branch @classmethod def get_by_id(cls, id_: int) -> Optional["GitBranchModel"]: with get_sa_session() as session: return session.query(GitBranchModel).filter_by(id=id_).first() def __repr__(self): return f"GitBranchModel(name={self.name}, project={self.project})" class BugzillaModel(Base): __tablename__ = "bugzillas" id = Column(Integer, primary_key=True) bug_id = Column(Integer, index=True) bug_url = Column(String) pull_request_id = Column(Integer, ForeignKey("pull_requests.id")) pull_request = relationship("PullRequestModel", back_populates="bugzilla") @classmethod def get_or_create( cls, pr_id: int, namespace: str, repo_name: str, project_url: str, bug_id: int = None, bug_url: str = None, ) -> "BugzillaModel": with get_sa_session() as session: pull_request = PullRequestModel.get_or_create( pr_id=pr_id, namespace=namespace, repo_name=repo_name, project_url=project_url, ) bugzilla = ( session.query(BugzillaModel) .filter_by(pull_request_id=pull_request.id) .first() ) if not bugzilla and bug_id and bug_url: bugzilla = BugzillaModel() bugzilla.bug_id = bug_id bugzilla.bug_url = bug_url bugzilla.pull_request_id = pull_request.id session.add(bugzilla) return bugzilla @classmethod def get_by_pr( cls, pr_id: int, namespace: str, repo_name: str, project_url: str, ) -> Optional["BugzillaModel"]: return cls.get_or_create( pr_id=pr_id, namespace=namespace, repo_name=repo_name, project_url=project_url, ) def __repr__(self): return f"BugzillaModel(bug_id={self.bug_id}, bug_url={self.bug_url})" class ProjectReleaseModel(Base): __tablename__ = "project_releases" id = Column(Integer, primary_key=True) # our database PK tag_name = Column(String) commit_hash = Column(String) project_id = Column(Integer, ForeignKey("git_projects.id")) project = relationship("GitProjectModel", back_populates="releases") job_config_trigger_type = JobConfigTriggerType.release job_trigger_model_type = JobTriggerModelType.release @classmethod def get_or_create( cls, tag_name: str, namespace: str, repo_name: str, project_url: str, commit_hash: Optional[str] = None, ) -> "ProjectReleaseModel": with get_sa_session() as session: project = GitProjectModel.get_or_create( namespace=namespace, repo_name=repo_name, project_url=project_url ) project_release = ( session.query(ProjectReleaseModel) .filter_by(tag_name=tag_name, project_id=project.id) .first() ) if not project_release: project_release = ProjectReleaseModel() project_release.tag_name = tag_name project_release.project = project project_release.commit_hash = commit_hash session.add(project_release) return project_release @classmethod def get_by_id(cls, id_: int) -> Optional["ProjectReleaseModel"]: with get_sa_session() as session: return session.query(ProjectReleaseModel).filter_by(id=id_).first() def __repr__(self): return ( f"ProjectReleaseModel(" f"tag_name={self.tag_name}, " f"project={self.project})" ) AbstractTriggerDbType = Union[ PullRequestModel, ProjectReleaseModel, GitBranchModel, IssueModel, ] MODEL_FOR_TRIGGER: Dict[JobTriggerModelType, Type[AbstractTriggerDbType]] = { JobTriggerModelType.pull_request: PullRequestModel, JobTriggerModelType.branch_push: GitBranchModel, JobTriggerModelType.release: ProjectReleaseModel, JobTriggerModelType.issue: IssueModel, } class JobTriggerModel(Base): """ Model representing a trigger of some packit task. It connects PipelineModel (and built/test models via that model) with models like PullRequestModel, GitBranchModel or ProjectReleaseModel. * It contains type and id of the other database_model. * We know table and id that we need to find in that table. * Each PipelineModel has to be connected to exactly one JobTriggerModel. * There can be multiple PipelineModels for one JobTriggerModel. (e.g. For each push to PR, there will be new PipelineModel, but same JobTriggerModel.) """ __tablename__ = "job_triggers" id = Column(Integer, primary_key=True) # our database PK type = Column(Enum(JobTriggerModelType)) trigger_id = Column(Integer) runs = relationship("PipelineModel", back_populates="job_trigger") @classmethod def get_or_create( cls, type: JobTriggerModelType, trigger_id: int ) -> "JobTriggerModel": with get_sa_session() as session: trigger = ( session.query(JobTriggerModel) .filter_by(type=type, trigger_id=trigger_id) .first() ) if not trigger: trigger = JobTriggerModel() trigger.type = type trigger.trigger_id = trigger_id session.add(trigger) return trigger @classmethod def get_by_id(cls, id_: int) -> "JobTriggerModel": with get_sa_session() as session: return session.query(JobTriggerModel).filter_by(id=id_).first() def get_trigger_object(self) -> Optional[AbstractTriggerDbType]: with get_sa_session() as session: return ( session.query(MODEL_FOR_TRIGGER[self.type]) .filter_by(id=self.trigger_id) .first() ) def __repr__(self): return f"JobTriggerModel(type={self.type}, trigger_id={self.trigger_id})" class PipelineModel(Base): """ Represents one pipeline. Connects JobTriggerModel (and triggers like PullRequestModel via that model) with build/test models like SRPMBuildModel, CoprBuildTargetModel, KojiBuildTargetModel, and TFTTestRunTargetModel. * One model of each build/test model can be connected. * Each build/test model can be connected to multiple PipelineModels (e.g. on retrigger). * Each PipelineModel has to be connected to exactly one JobTriggerModel. * There can be multiple PipelineModels for one JobTriggerModel. (e.g. For each push to PR, there will be new PipelineModel, but same JobTriggerModel.) """ __tablename__ = "pipelines" id = Column(Integer, primary_key=True) # our database PK # datetime.utcnow instead of datetime.utcnow() because its an argument to the function # so it will run when the model is initiated, not when the table is made datetime = Column(DateTime, default=datetime.utcnow) job_trigger_id = Column(Integer, ForeignKey("job_triggers.id")) job_trigger = relationship("JobTriggerModel", back_populates="runs") srpm_build_id = Column(Integer, ForeignKey("srpm_builds.id")) srpm_build = relationship("SRPMBuildModel", back_populates="runs") copr_build_id = Column(Integer, ForeignKey("copr_build_targets.id")) copr_build = relationship("CoprBuildTargetModel", back_populates="runs") koji_build_id = Column(Integer, ForeignKey("koji_build_targets.id")) koji_build = relationship("KojiBuildTargetModel", back_populates="runs") test_run_id = Column(Integer, ForeignKey("tft_test_run_targets.id")) test_run = relationship("TFTTestRunTargetModel", back_populates="runs") propose_downstream_run_id = Column( Integer, ForeignKey("propose_downstream_runs.id") ) propose_downstream_run = relationship( "ProposeDownstreamModel", back_populates="runs" ) @classmethod def create(cls, type: JobTriggerModelType, trigger_id: int) -> "PipelineModel": with get_sa_session() as session: run_model = PipelineModel() run_model.job_trigger = JobTriggerModel.get_or_create( type=type, trigger_id=trigger_id ) session.add(run_model) return run_model def get_trigger_object(self) -> AbstractTriggerDbType: return self.job_trigger.get_trigger_object() def __repr__(self): return f"PipelineModel(id={self.id}, datetime='{datetime}', job_trigger={self.job_trigger})" @classmethod def __query_merged_runs(cls, session): return session.query( func.min(PipelineModel.id).label("merged_id"), PipelineModel.srpm_build_id, func.array_agg(psql_array([PipelineModel.copr_build_id])).label( "copr_build_id" ), func.array_agg(psql_array([PipelineModel.koji_build_id])).label( "koji_build_id" ), func.array_agg(psql_array([PipelineModel.test_run_id])).label( "test_run_id" ), func.array_agg(psql_array([PipelineModel.propose_downstream_run_id])).label( "propose_downstream_run_id", ), ) @classmethod def get_merged_chroots(cls, first: int, last: int) -> Iterable["PipelineModel"]: with get_sa_session() as session: return ( cls.__query_merged_runs(session) .group_by( PipelineModel.srpm_build_id, case( [(PipelineModel.srpm_build_id.isnot(null()), 0)], else_=PipelineModel.id, ), ) .order_by(desc("merged_id")) .slice(first, last) ) @classmethod def get_merged_run(cls, first_id: int) -> Optional[Iterable["PipelineModel"]]: with get_sa_session() as session: return ( cls.__query_merged_runs(session) .filter( PipelineModel.id >= first_id, PipelineModel.id <= first_id + 100 ) .group_by( PipelineModel.srpm_build_id, case( [(PipelineModel.srpm_build_id.isnot(null()), 0)], else_=PipelineModel.id, ), ) .first() ) @classmethod def get_run(cls, id_: int) -> Optional["PipelineModel"]: with get_sa_session() as session: return session.query(PipelineModel).filter_by(id=id_).first() class CoprBuildTargetModel(ProjectAndTriggersConnector, Base): """ Representation of Copr build for one target. """ __tablename__ = "copr_build_targets" id = Column(Integer, primary_key=True) build_id = Column(String, index=True) # copr build id # commit sha of the PR (or a branch, release) we used for a build commit_sha = Column(String) # what's the build status? status = Column(String) # chroot, but we use the word target in our docs target = Column(String) # URL to copr web ui for the particular build web_url = Column(String) # url to copr build logs build_logs_url = Column(String) # for monitoring: time when we set the status about accepted task task_accepted_time = Column(DateTime) # datetime.utcnow instead of datetime.utcnow() because its an argument to the function # so it will run when the copr build is initiated, not when the table is made build_submitted_time = Column(DateTime, default=datetime.utcnow) build_start_time = Column(DateTime) build_finished_time = Column(DateTime) # project name as shown in copr project_name = Column(String) owner = Column(String) # metadata for the build which didn't make it to schema yet # metadata is reserved to sqlalch data = Column(JSON) # info about built packages we get from Copr, e.g. # [ # { # "arch": "noarch", # "epoch": 0, # "name": "python3-packit", # "release": "1.20210930124525726166.main.0.g0b7b36b.fc36", # "version": "0.38.0", # } # ] built_packages = Column(JSON) runs = relationship("PipelineModel", back_populates="copr_build") def set_built_packages(self, built_packages): with get_sa_session() as session: self.built_packages = built_packages session.add(self) def set_start_time(self, start_time: datetime): with get_sa_session() as session: self.build_start_time = start_time session.add(self) def set_end_time(self, end_time: datetime): with get_sa_session() as session: self.build_finished_time = end_time session.add(self) def set_status(self, status: str): with get_sa_session() as session: self.status = status session.add(self) def set_build_logs_url(self, build_logs: str): with get_sa_session() as session: self.build_logs_url = build_logs session.add(self) def get_srpm_build(self) -> Optional["SRPMBuildModel"]: if not self.runs: return None # All SRPMBuild models for all the runs have to be same. return self.runs[0].srpm_build @classmethod def get_by_id(cls, id_: int) -> Optional["CoprBuildTargetModel"]: with get_sa_session() as session: return session.query(CoprBuildTargetModel).filter_by(id=id_).first() @classmethod def get_all(cls) -> Optional[Iterable["CoprBuildTargetModel"]]: with get_sa_session() as session: return ( session.query(CoprBuildTargetModel) .order_by(desc(CoprBuildTargetModel.id)) .all() ) @classmethod def get_merged_chroots( cls, first: int, last: int ) -> Iterable["CoprBuildTargetModel"]: """Returns a list of unique build ids with merged status, chroots Details: https://github.com/packit/packit-service/pull/674#discussion_r439819852 """ with get_sa_session() as session: return ( session.query( # We need something to order our merged builds by, # so set new_id to be min(ids of to-be-merged rows) func.min(CoprBuildTargetModel.id).label("new_id"), # Select identical element(s) CoprBuildTargetModel.build_id, # Merge chroots and statuses from different rows into one func.array_agg(psql_array([CoprBuildTargetModel.target])).label( "target" ), func.array_agg(psql_array([CoprBuildTargetModel.status])).label( "status" ), func.array_agg(psql_array([CoprBuildTargetModel.id])).label( "packit_id_per_chroot" ), ) .group_by( CoprBuildTargetModel.build_id ) # Group by identical element(s) .order_by(desc("new_id")) .slice(first, last) ) # Returns all builds with that build_id, irrespective of target @classmethod def get_all_by_build_id( cls, build_id: Union[str, int] ) -> Optional[Iterable["CoprBuildTargetModel"]]: if isinstance(build_id, int): # See the comment in get_by_build_id() build_id = str(build_id) with get_sa_session() as session: return session.query(CoprBuildTargetModel).filter_by(build_id=build_id) @classmethod def get_all_by_status( cls, status: str ) -> Optional[Iterable["CoprBuildTargetModel"]]: """Returns all builds which currently have the given status.""" with get_sa_session() as session: return session.query(CoprBuildTargetModel).filter_by(status=status) # returns the build matching the build_id and the target @classmethod def get_by_build_id( cls, build_id: Union[str, int], target: str = None ) -> Optional["CoprBuildTargetModel"]: if isinstance(build_id, int): # PG is pesky about this: # LINE 3: WHERE copr_builds.build_id = 1245767 AND copr_builds.target ... # HINT: No operator matches the given name and argument type(s). # You might need to add explicit type casts. build_id = str(build_id) with get_sa_session() as session: query = session.query(CoprBuildTargetModel).filter_by(build_id=build_id) if target: query = query.filter_by(target=target) return query.first() @staticmethod def get_all_by( project_name: str, commit_sha: str, owner: str = None, target: str = None, ) -> Optional[Iterable["CoprBuildTargetModel"]]: """ All owner/project_name builds sorted from latest to oldest with the given commit_sha and optional target. """ non_none_args = { arg: value for arg, value in locals().items() if value is not None } with get_sa_session() as session: query = ( session.query(CoprBuildTargetModel) .filter_by(**non_none_args) .order_by(CoprBuildTargetModel.build_id.desc()) ) return query.all() @classmethod def get_all_by_commit( cls, commit_sha: str ) -> Optional[Iterable["CoprBuildTargetModel"]]: """Returns all builds that match a given commit sha""" with get_sa_session() as session: query = session.query(CoprBuildTargetModel).filter_by(commit_sha=commit_sha) return query.all() @classmethod def create( cls, build_id: str, commit_sha: str, project_name: str, owner: str, web_url: str, target: str, status: str, run_model: "PipelineModel", task_accepted_time: Optional[datetime] = None, ) -> "CoprBuildTargetModel": with get_sa_session() as session: build = cls() build.build_id = build_id build.status = status build.project_name = project_name build.owner = owner build.commit_sha = commit_sha build.web_url = web_url build.target = target build.task_accepted_time = task_accepted_time session.add(build) if run_model.copr_build: # Clone run model new_run_model = PipelineModel.create( type=run_model.job_trigger.type, trigger_id=run_model.job_trigger.trigger_id, ) new_run_model.srpm_build = run_model.srpm_build new_run_model.copr_build = build session.add(new_run_model) else: run_model.copr_build = build session.add(run_model) return build @classmethod def get( cls, build_id: str, target: str, ) -> "CoprBuildTargetModel": return cls.get_by_build_id(build_id, target) def __repr__(self): return f"COPRBuildModel(id={self.id}, build_submitted_time={self.build_submitted_time})" class KojiBuildTargetModel(ProjectAndTriggersConnector, Base): """we create an entry for every target""" __tablename__ = "koji_build_targets" id = Column(Integer, primary_key=True) build_id = Column(String, index=True) # koji build id # commit sha of the PR (or a branch, release) we used for a build commit_sha = Column(String) # what's the build status? status = Column(String) # chroot, but we use the word target in our docs target = Column(String) # URL to koji web ui for the particular build web_url = Column(String) # url to koji build logs build_logs_url = Column(String) # datetime.utcnow instead of datetime.utcnow() because its an argument to the function # so it will run when the koji build is initiated, not when the table is made build_submitted_time = Column(DateTime, default=datetime.utcnow) build_start_time = Column(DateTime) build_finished_time = Column(DateTime) # metadata for the build which didn't make it to schema yet # metadata is reserved to sqlalch data = Column(JSON) # it is a scratch build? scratch = Column(Boolean) runs = relationship("PipelineModel", back_populates="koji_build") def set_status(self, status: str): with get_sa_session() as session: self.status = status session.add(self) def set_build_logs_url(self, build_logs: str): with get_sa_session() as session: self.build_logs_url = build_logs session.add(self) def set_web_url(self, web_url: str): with get_sa_session() as session: self.web_url = web_url session.add(self) def set_build_start_time(self, build_start_time: Optional[DateTime]): with get_sa_session() as session: self.build_start_time = build_start_time session.add(self) def set_build_finished_time(self, build_finished_time: Optional[DateTime]): with get_sa_session() as session: self.build_finished_time = build_finished_time session.add(self) def set_build_submitted_time(self, build_submitted_time: Optional[DateTime]): with get_sa_session() as session: self.build_submitted_time = build_submitted_time session.add(self) def set_scratch(self, value: bool): with get_sa_session() as session: self.scratch = value session.add(self) def get_srpm_build(self) -> Optional["SRPMBuildModel"]: if not self.runs: return None # All SRPMBuild models for all the runs have to be same. return self.runs[0].srpm_build @classmethod def get_by_id(cls, id_: int) -> Optional["KojiBuildTargetModel"]: with get_sa_session() as session: return session.query(KojiBuildTargetModel).filter_by(id=id_).first() @classmethod def get_all(cls) -> Optional[Iterable["KojiBuildTargetModel"]]: with get_sa_session() as session: return session.query(KojiBuildTargetModel).all() @classmethod def get_range(cls, first: int, last: int) -> Iterable["KojiBuildTargetModel"]: with get_sa_session() as session: return ( session.query(KojiBuildTargetModel) .order_by(desc(KojiBuildTargetModel.id)) .slice(first, last) ) # Returns all builds with that build_id, irrespective of target @classmethod def get_all_by_build_id( cls, build_id: Union[str, int] ) -> Optional[Iterable["KojiBuildTargetModel"]]: if isinstance(build_id, int): # See the comment in get_by_build_id() build_id = str(build_id) with get_sa_session() as session: return session.query(KojiBuildTargetModel).filter_by(build_id=build_id) @classmethod def get_by_build_id( cls, build_id: Union[str, int], target: Optional[str] = None ) -> Optional["KojiBuildTargetModel"]: """ Returns the build matching the build_id and the target. """ if isinstance(build_id, int): # PG is pesky about this: # LINE 3: WHERE koji_builds.build_id = 1245767 AND koji_builds.target ... # HINT: No operator matches the given name and argument type(s). # You might need to add explicit type casts. build_id = str(build_id) with get_sa_session() as session: if target: return ( session.query(KojiBuildTargetModel) .filter_by(build_id=build_id, target=target) .first() ) return ( session.query(KojiBuildTargetModel).filter_by(build_id=build_id).first() ) @classmethod def create( cls, build_id: str, commit_sha: str, web_url: str, target: str, status: str, scratch: bool, run_model: "PipelineModel", ) -> "KojiBuildTargetModel": with get_sa_session() as session: build = cls() build.build_id = build_id build.status = status build.commit_sha = commit_sha build.web_url = web_url build.target = target build.scratch = scratch session.add(build) if run_model.koji_build: # Clone run model new_run_model = PipelineModel.create( type=run_model.job_trigger.type, trigger_id=run_model.job_trigger.trigger_id, ) new_run_model.srpm_build = run_model.srpm_build new_run_model.koji_build = build session.add(new_run_model) else: run_model.koji_build = build session.add(run_model) return build @classmethod def get( cls, build_id: str, target: str, ) -> Optional["KojiBuildTargetModel"]: return cls.get_by_build_id(build_id, target) def __repr__(self): return ( f"KojiBuildTargetModel(id={self.id}, " f"build_submitted_time={self.build_submitted_time})" ) class SRPMBuildModel(ProjectAndTriggersConnector, Base): __tablename__ = "srpm_builds" id = Column(Integer, primary_key=True) status = Column(String) # our logs we want to show to the user logs = Column(Text) build_submitted_time = Column(DateTime, default=datetime.utcnow) build_start_time = Column(DateTime) build_finished_time = Column(DateTime) commit_sha = Column(String) # url for downloading the SRPM url = Column(Text) # attributes for SRPM built by Copr logs_url = Column(Text) copr_build_id = Column(String, index=True) copr_web_url = Column(Text) runs = relationship("PipelineModel", back_populates="srpm_build") @classmethod def create_with_new_run( cls, trigger_model: AbstractTriggerDbType, commit_sha: str, copr_build_id: Optional[str] = None, copr_web_url: Optional[str] = None, ) -> Tuple["SRPMBuildModel", "PipelineModel"]: """ Create a new model for SRPM and connect it to the PipelineModel. * New SRPMBuildModel model will have connection to a new PipelineModel. * The newly created PipelineModel can reuse existing JobTriggerModel (e.g.: one pull-request can have multiple runs). More specifically: * On PR creation: -> SRPMBuildModel is created. -> New PipelineModel is created. -> JobTriggerModel is created. * On `/packit build` comment or new push: -> SRPMBuildModel is created. -> New PipelineModel is created. -> JobTriggerModel is reused. * On `/packit test` comment: -> SRPMBuildModel and CoprBuildTargetModel are reused. -> New TFTTestRunTargetModel is created. -> New PipelineModel is created and collects this new TFTTestRunTargetModel with old SRPMBuildModel and CoprBuildTargetModel. """ with get_sa_session() as session: srpm_build = cls() srpm_build.status = "pending" srpm_build.commit_sha = commit_sha srpm_build.copr_build_id = copr_build_id srpm_build.copr_web_url = copr_web_url session.add(srpm_build) # Create a new run model, reuse trigger_model if it exists: new_run_model = PipelineModel.create( type=trigger_model.job_trigger_model_type, trigger_id=trigger_model.id ) new_run_model.srpm_build = srpm_build session.add(new_run_model) return srpm_build, new_run_model @classmethod def get_by_id( cls, id_: int, ) -> Optional["SRPMBuildModel"]: with get_sa_session() as session: return session.query(SRPMBuildModel).filter_by(id=id_).first() @classmethod def get(cls, first: int, last: int) -> Iterable["SRPMBuildModel"]: with get_sa_session() as session: return ( session.query(SRPMBuildModel) .order_by(desc(SRPMBuildModel.id)) .slice(first, last) ) @classmethod def get_by_copr_build_id( cls, copr_build_id: Union[str, int] ) -> Optional["SRPMBuildModel"]: if isinstance(copr_build_id, int): copr_build_id = str(copr_build_id) with get_sa_session() as session: return ( session.query(SRPMBuildModel) .filter_by(copr_build_id=copr_build_id) .first() ) @classmethod def get_older_than(cls, delta: timedelta) -> Iterable["SRPMBuildModel"]: """Return builds older than delta, whose logs/artifacts haven't been discarded yet.""" delta_ago = datetime.utcnow() - delta with get_sa_session() as session: return session.query(SRPMBuildModel).filter( SRPMBuildModel.build_submitted_time < delta_ago, SRPMBuildModel.logs.isnot(None), ) def set_url(self, url: Optional[str]) -> None: with get_sa_session() as session: self.url = null() if url is None else url session.add(self) def set_logs(self, logs: Optional[str]) -> None: with get_sa_session() as session: self.logs = null() if logs is None else logs session.add(self) def set_start_time(self, start_time: datetime) -> None: with get_sa_session() as session: self.build_start_time = start_time session.add(self) def set_end_time(self, end_time: datetime) -> None: with get_sa_session() as session: self.build_finished_time = end_time session.add(self) def set_build_logs_url(self, logs_url: str) -> None: with get_sa_session() as session: self.logs_url = logs_url session.add(self) def set_status(self, status: str) -> None: with get_sa_session() as session: self.status = status session.add(self) def __repr__(self): return f"SRPMBuildModel(id={self.id}, build_submitted_time={self.build_submitted_time})" class AllowlistStatus(str, enum.Enum): approved_automatically = ALLOWLIST_CONSTANTS["approved_automatically"] waiting = ALLOWLIST_CONSTANTS["waiting"] approved_manually = ALLOWLIST_CONSTANTS["approved_manually"] denied = ALLOWLIST_CONSTANTS["denied"] class AllowlistModel(Base): __tablename__ = "allowlist" id = Column(Integer, primary_key=True) namespace = Column(String, index=True) # renamed from account_name status = Column(Enum(AllowlistStatus)) fas_account = Column(String) @classmethod def add_namespace( cls, namespace: str, status: str, fas_account: Optional[str] = None ): """ Adds namespace with specific status to the allowlist. If namespace is present, just changes the status. Args: namespace (str): Namespace to be added. Can be `github.com/namespace` or specific repository `github.com/namespace/repository.git`. status (str): Status to be set. AllowlistStatus enumeration as string. fas_account (Optional[str]): FAS login, in case the namespace was automatically approved through the FAS login of user that installed GitHub App. Defaults to `None`. Returns: Newly created entry or entry that represents requested namespace. """ with get_sa_session() as session: namespace_entry = cls.get_namespace(namespace) if not namespace_entry: namespace_entry = cls() namespace_entry.namespace = namespace namespace_entry.status = status if fas_account: namespace_entry.fas_account = fas_account session.add(namespace_entry) return namespace_entry @classmethod def get_namespace(cls, namespace: str) -> Optional["AllowlistModel"]: """ Retrieves namespace from the allowlist. Args: namespace (str): Namespace to be added. Can be `github.com/namespace` or specific repository `github.com/namespace/repository.git`. Returns: Entry that represents namespace or `None` if cannot be found. """ with get_sa_session() as session: return session.query(AllowlistModel).filter_by(namespace=namespace).first() @classmethod def get_namespaces_by_status( cls, status: str ) -> Optional[Iterable["AllowlistModel"]]: """ Get list of namespaces with specific status. Args: status (str): Status of the namespaces. AllowlistStatus enumeration as string. Returns: List of the namespaces with set status. """ with get_sa_session() as session: return session.query(AllowlistModel).filter_by(status=status) @classmethod def remove_namespace(cls, namespace: str) -> Optional["AllowlistModel"]: with get_sa_session() as session: namespace_entry = session.query(AllowlistModel).filter_by( namespace=namespace ) if namespace_entry: namespace_entry.delete() return namespace_entry @classmethod def get_all(cls) -> Optional[Iterable["AllowlistModel"]]: with get_sa_session() as session: return session.query(AllowlistModel).all() def to_dict(self) -> Dict[str, str]: return { "namespace": self.namespace, "status": self.status, "fas_account": self.fas_account, } def __repr__(self): return ( f'<AllowlistModel(namespace="{self.namespace}", ' f'status="{self.status}", ' f'fas_account="{self.fas_account}")>' ) class TestingFarmResult(str, enum.Enum): new = "new" queued = "queued" running = "running" passed = "passed" failed = "failed" skipped = "skipped" error = "error" unknown = "unknown" needs_inspection = "needs_inspection" class TFTTestRunTargetModel(ProjectAndTriggersConnector, Base): __tablename__ = "tft_test_run_targets" id = Column(Integer, primary_key=True) pipeline_id = Column(String, index=True) commit_sha = Column(String) status = Column(Enum(TestingFarmResult)) target = Column(String) web_url = Column(String) # datetime.utcnow instead of datetime.utcnow() because its an argument to the function # so it will run when the model is initiated, not when the table is made submitted_time = Column(DateTime, default=datetime.utcnow) data = Column(JSON) runs = relationship("PipelineModel", back_populates="test_run") def set_status(self, status: TestingFarmResult, created: Optional[DateTime] = None): """ set status of the TF run and optionally set the created datetime as well """ with get_sa_session() as session: self.status = status if created and not self.submitted_time: self.submitted_time = created session.add(self) def set_web_url(self, web_url: str): with get_sa_session() as session: self.web_url = web_url session.add(self) @classmethod def create( cls, pipeline_id: str, commit_sha: str, status: TestingFarmResult, target: str, run_model: "PipelineModel", web_url: Optional[str] = None, data: dict = None, ) -> "TFTTestRunTargetModel": with get_sa_session() as session: test_run = cls() test_run.pipeline_id = pipeline_id test_run.commit_sha = commit_sha test_run.status = status test_run.target = target test_run.web_url = web_url test_run.data = data session.add(test_run) if run_model.test_run: # Clone run model new_run_model = PipelineModel.create( type=run_model.job_trigger.type, trigger_id=run_model.job_trigger.trigger_id, ) new_run_model.srpm_build = run_model.srpm_build new_run_model.copr_build = run_model.copr_build new_run_model.test_run = test_run session.add(new_run_model) else: run_model.test_run = test_run session.add(run_model) return test_run @classmethod def get_by_pipeline_id(cls, pipeline_id: str) -> Optional["TFTTestRunTargetModel"]: with get_sa_session() as session: return ( session.query(TFTTestRunTargetModel) .filter_by(pipeline_id=pipeline_id) .first() ) @classmethod def get_all_by_status( cls, *status: TestingFarmResult ) -> Optional[Iterable["TFTTestRunTargetModel"]]: """Returns all runs which currently have their status set to one of the requested statuses.""" with get_sa_session() as session: return session.query(TFTTestRunTargetModel).filter( TFTTestRunTargetModel.status.in_(status) ) @classmethod def get_by_id(cls, id: int) -> Optional["TFTTestRunTargetModel"]: with get_sa_session() as session: return session.query(TFTTestRunTargetModel).filter_by(id=id).first() @staticmethod def get_all_by_commit_target( commit_sha: str, target: str = None, ) -> Optional[Iterable["TFTTestRunTargetModel"]]: """ All tests with the given commit_sha and optional target. """ non_none_args = { arg: value for arg, value in locals().items() if value is not None } with get_sa_session() as session: query = session.query(TFTTestRunTargetModel).filter_by(**non_none_args) return query.all() @classmethod def get_range( cls, first: int, last: int ) -> Optional[Iterable["TFTTestRunTargetModel"]]: with get_sa_session() as session: return ( session.query(TFTTestRunTargetModel) .order_by(desc(TFTTestRunTargetModel.id)) .slice(first, last) ) def __repr__(self): return f"TFTTestRunTargetModel(id={self.id}, pipeline_id={self.pipeline_id})" class ProposeDownstreamTargetStatus(str, enum.Enum): queued = "queued" running = "running" error = "error" retry = "retry" submitted = "submitted" class ProposeDownstreamTargetModel(ProjectAndTriggersConnector, Base): __tablename__ = "propose_downstream_run_targets" id = Column(Integer, primary_key=True) branch = Column(String, default="unknown") downstream_pr_url = Column(String) status = Column(Enum(ProposeDownstreamTargetStatus)) submitted_time = Column(DateTime, default=datetime.utcnow) start_time = Column(DateTime) finished_time = Column(DateTime) logs = Column(Text) propose_downstream_id = Column(Integer, ForeignKey("propose_downstream_runs.id")) propose_downstream = relationship( "ProposeDownstreamModel", back_populates="propose_downstream_targets" ) def __repr__(self) -> str: return f"ProposeDownstreamTargetModel(id={self.id})" @classmethod def create( cls, status: ProposeDownstreamTargetStatus, ) -> "ProposeDownstreamTargetModel": with get_sa_session() as session: downstream_pr = cls() downstream_pr.status = status session.add(downstream_pr) return downstream_pr def set_status(self, status: ProposeDownstreamTargetStatus) -> None: with get_sa_session() as session: self.status = status session.add(self) def set_downstream_pr_url(self, downstream_pr_url: str) -> None: with get_sa_session() as session: self.downstream_pr_url = downstream_pr_url session.add(self) def set_start_time(self, start_time: DateTime) -> None: with get_sa_session() as session: self.start_time = start_time session.add(self) def set_finished_time(self, finished_time: DateTime) -> None: with get_sa_session() as session: self.finished_time = finished_time session.add(self) def set_branch(self, branch: str) -> None: with get_sa_session() as session: self.branch = branch session.add(self) def set_logs(self, logs: str) -> None: with get_sa_session() as session: self.logs = logs session.add(self) @classmethod def get_by_id(cls, id_: int) -> Optional["ProposeDownstreamTargetModel"]: with get_sa_session() as session: return session.query(ProposeDownstreamTargetModel).filter_by(id=id_).first() class ProposeDownstreamStatus(str, enum.Enum): running = "running" finished = "finished" error = "error" class ProposeDownstreamModel(ProjectAndTriggersConnector, Base): __tablename__ = "propose_downstream_runs" id = Column(Integer, primary_key=True) status = Column(Enum(ProposeDownstreamStatus)) submitted_time = Column(DateTime, default=datetime.utcnow) runs = relationship("PipelineModel", back_populates="propose_downstream_run") propose_downstream_targets = relationship( "ProposeDownstreamTargetModel", back_populates="propose_downstream" ) def __repr__(self) -> str: return f"ProposeDownstreamModel(id={self.id}, submitted_time={self.submitted_time})" @classmethod def create_with_new_run( cls, status: ProposeDownstreamStatus, trigger_model: AbstractTriggerDbType, ) -> Tuple["ProposeDownstreamModel", "PipelineModel"]: """ Create a new model for ProposeDownstream and connect it to the PipelineModel. * New ProposeDownstreamModel model will have connection to a new PipelineModel. * The newly created PipelineModel can reuse existing JobTriggerModel (e.g.: one IssueModel can have multiple runs). More specifically: * On `/packit propose-downstream` issue comment: -> ProposeDownstreamModel is created. -> New PipelineModel is created. -> JobTriggerModel is created. * Something went wrong, after correction and another `/packit propose-downstream` comment: -> ProposeDownstreamModel is created. -> PipelineModel is created. -> JobTriggerModel is reused. * TODO: we will use propose-downstream in commit-checks - fill in once it's implemented """ with get_sa_session() as session: propose_downstream = cls() propose_downstream.status = status session.add(propose_downstream) # Create a pipeline, reuse trigger_model if it exists: pipeline = PipelineModel.create( type=trigger_model.job_trigger_model_type, trigger_id=trigger_model.id ) pipeline.propose_downstream_run = propose_downstream session.add(pipeline) return propose_downstream, pipeline def set_status(self, status: ProposeDownstreamStatus) -> None: with get_sa_session() as session: self.status = status session.add(self) @classmethod def get_by_id(cls, id_: int) -> Optional["ProposeDownstreamModel"]: with get_sa_session() as session: return session.query(ProposeDownstreamModel).filter_by(id=id_).first() @classmethod def get_all_by_status( cls, status: str ) -> Optional[Iterable["ProposeDownstreamModel"]]: with get_sa_session() as session: return session.query(ProposeDownstreamModel).filter_by(status=status) @classmethod def get_range(cls, first: int, last: int) -> Iterable["ProposeDownstreamModel"]: with get_sa_session() as session: return ( session.query(ProposeDownstreamModel) .order_by(desc(ProposeDownstreamModel.id)) .slice(first, last) ) AbstractBuildTestDbType = Union[ CoprBuildTargetModel, KojiBuildTargetModel, SRPMBuildModel, TFTTestRunTargetModel, ProposeDownstreamModel, ] class ProjectAuthenticationIssueModel(Base): __tablename__ = "project_authentication_issue" id = Column(Integer, primary_key=True) project = relationship( "GitProjectModel", back_populates="project_authentication_issue" ) # Check to know if we created a issue for the repo. issue_created = Column(Boolean) project_id = Column(Integer, ForeignKey("git_projects.id")) @classmethod def get_project( cls, namespace: str, repo_name: str, project_url: str ) -> Optional["ProjectAuthenticationIssueModel"]: with get_sa_session() as session: project = GitProjectModel.get_or_create( namespace=namespace, repo_name=repo_name, project_url=project_url ) return ( session.query(ProjectAuthenticationIssueModel) .filter_by(project_id=project.id) .first() ) @classmethod def create( cls, namespace: str, repo_name: str, project_url: str, issue_created: bool ) -> "ProjectAuthenticationIssueModel": with get_sa_session() as session: project = GitProjectModel.get_or_create( namespace=namespace, repo_name=repo_name, project_url=project_url ) project_authentication_issue = cls() project_authentication_issue.issue_created = issue_created project_authentication_issue.project_id = project.id session.add(project_authentication_issue) return project_authentication_issue def __repr__(self): return ( f"ProjectAuthenticationIssueModel(project={self.project}, " f"issue_created={self.issue_created})" ) class GithubInstallationModel(Base): __tablename__ = "github_installations" id = Column(Integer, primary_key=True, autoincrement=True) # information about account (user/organization) into which the app has been installed account_login = Column(String) account_id = Column(Integer) account_url = Column(String) account_type = Column(String) # information about user who installed the app into 'account' sender_id = Column(Integer) sender_login = Column(String) created_at = Column(DateTime, default=datetime.utcnow) repositories = Column(ARRAY(Integer, ForeignKey("git_projects.id"))) @classmethod def get_project(cls, repository: str): namespace, repo_name = repository.split("/") return GitProjectModel.get_or_create( namespace=namespace, repo_name=repo_name, project_url=f"https://github.com/{namespace}/{repo_name}", ) @classmethod def get_by_id(cls, id: int) -> Optional["GithubInstallationModel"]: with get_sa_session() as session: return session.query(GithubInstallationModel).filter_by(id=id).first() @classmethod def get_by_account_login( cls, account_login: str ) -> Optional["GithubInstallationModel"]: with get_sa_session() as session: return ( session.query(GithubInstallationModel) .filter_by(account_login=account_login) .first() ) @classmethod def get_all(cls) -> Optional[Iterable["GithubInstallationModel"]]: with get_sa_session() as session: return session.query(GithubInstallationModel).all() @classmethod def create(cls, event): with get_sa_session() as session: installation = cls.get_by_account_login(event.account_login) if not installation: installation = cls() installation.account_login = event.account_login installation.account_id = event.account_id installation.account_url = event.account_url installation.account_type = event.account_type installation.sender_login = event.sender_login installation.sender_id = event.sender_id installation.created_at = event.created_at installation.repositories = [ cls.get_project(repo).id for repo in event.repositories ] session.add(installation) return installation def to_dict(self): return { "account_login": self.account_login, "account_id": self.account_id, "account_type": self.account_type, "account_url": self.account_url, "sender_login": self.sender_login, "sender_id": self.sender_id, # Inconsistent with other API endpoints, kept for readability for # internal use, if necessary "created_at": optional_time(self.created_at), } def __repr__(self): return f"GithubInstallationModel(id={self.id}, account={self.account_login})" class SourceGitPRDistGitPRModel(Base): __tablename__ = "source_git_pr_dist_git_pr" id = Column(Integer, primary_key=True) # our database PK source_git_pull_request_id = Column( Integer, ForeignKey("pull_requests.id"), unique=True, index=True ) dist_git_pull_request_id = Column( Integer, ForeignKey("pull_requests.id"), unique=True, index=True ) source_git_pull_request = relationship( "PullRequestModel", primaryjoin="SourceGitPRDistGitPRModel.source_git_pull_request_id==PullRequestModel.id", uselist=False, ) dist_git_pull_request = relationship( "PullRequestModel", primaryjoin="SourceGitPRDistGitPRModel.dist_git_pull_request_id==PullRequestModel.id", uselist=False, ) @classmethod def get_or_create( cls, source_git_pr_id: int, source_git_namespace: str, source_git_repo_name: str, source_git_project_url: str, dist_git_pr_id: int, dist_git_namespace: str, dist_git_repo_name: str, dist_git_project_url: str, ) -> "SourceGitPRDistGitPRModel": with get_sa_session() as session: source_git_pull_request = PullRequestModel.get_or_create( pr_id=source_git_pr_id, namespace=source_git_namespace, repo_name=source_git_repo_name, project_url=source_git_project_url, ) dist_git_pull_request = PullRequestModel.get_or_create( pr_id=dist_git_pr_id, namespace=dist_git_namespace, repo_name=dist_git_repo_name, project_url=dist_git_project_url, ) rel = ( session.query(SourceGitPRDistGitPRModel) .filter_by(source_git_pull_request_id=source_git_pull_request.id) .filter_by(dist_git_pull_request_id=dist_git_pull_request.id) .one_or_none() ) if not rel: rel = SourceGitPRDistGitPRModel() rel.source_git_pull_request_id = source_git_pull_request.id rel.dist_git_pull_request_id = dist_git_pull_request.id session.add(rel) return rel @classmethod def get_by_id(cls, id_: int) -> Optional["SourceGitPRDistGitPRModel"]: with get_sa_session() as session: return ( session.query(SourceGitPRDistGitPRModel).filter_by(id=id_).one_or_none() ) @classmethod def get_by_source_git_id(cls, id_: int) -> Optional["SourceGitPRDistGitPRModel"]: with get_sa_session() as session: return ( session.query(SourceGitPRDistGitPRModel) .filter_by(source_git_pull_request_id=id_) .one_or_none() ) @classmethod def get_by_dist_git_id(cls, id_: int) -> Optional["SourceGitPRDistGitPRModel"]: with get_sa_session() as session: return ( session.query(SourceGitPRDistGitPRModel) .filter_by(dist_git_pull_request_id=id_) .one_or_none() )
from django.test import TestCase from rest_framework.test import APITestCase from coodesh_app.models import SFNArticles, SFNArticlesLaunches import json from datetime import datetime from coodesh_app.management.commands.load_api_data import DATETIME_FORMAT # Create your tests here. class SFNArticlesCreateTestCase(APITestCase): ''' def test_create_record(self): initial_sfnarticles_count = SFNArticles.objects.count() # print("initial_sfnarticles_count", initial_sfnarticles_count) new_sfnarticle_data = { "api_id": 1, "title": "EMPTY", "url": "EMPTY", "imageUrl": "EMPTY", "newsSite": "EMPTY", "summary": "EMPTY", "updatedAt": "2022-02-13T13:34:02", "publishedAt": "2022-02-13T13:34:02", "featured": True, "event_id": "EMPTY", "event_provider": "EMPTY", "launche_id": "EMPTY", "launche_provider": "EMPTY", } response = self.client.post('http://127.0.0.1:8000/articles/', data=new_sfnarticle_data) if response.status_code != 200: print(response.data) new_sfnarticles_count = SFNArticles.objects.count() self.assertEqual(initial_sfnarticles_count + 1, new_sfnarticles_count, msg=f"response.data {response.data}") # print("new_sfnarticles_count", new_sfnarticles_count) for k, v in response.data.items(): if k != 'events' and k != 'lauches' and k != 'updatedAt' and k != 'publishedAt': self.assertEqual(new_sfnarticle_data[k], v, msg=f"new_sfnarticle_data[k] {new_sfnarticle_data[k]}, v {v}") elif k == 'updatedAt' or k == 'publishedAt': str_v = v.strftime(DATETIME_FORMAT) # print(f"str_v {str_v}") self.assertEqual(new_sfnarticle_data[k], str_v, msg=f"new_sfnarticle_data[k] {new_sfnarticle_data[k]}, str_v {str_v}") elif k == 'events': # print(v) for eventKey, eventValue in v[0].items(): if eventKey == 'article_event_id': self.assertEqual(new_sfnarticle_data["event_id"], eventValue, msg=f"new_sfnarticle_data['article_event_id'] {new_sfnarticle_data["event_id"]}, eventValue {eventValue}") else: self.assertEqual(new_sfnarticle_data['event_provider'], eventValue, msg=f"new_sfnarticle_data['event_provider'] {new_sfnarticle_data["event_provider"]}, eventValue {eventValue}") elif k == 'lauches': for lauchesKey, lauchesValue in v[0].items(): if lauchesKey == 'article_launche_id': self.assertEqual(new_sfnarticle_data["launche_id"], lauchesValue, msg=f"new_sfnarticle_data['article_launche_id'] {new_sfnarticle_data["launche_id"]}, lauchesValue {lauchesValue}") else: self.assertEqual(new_sfnarticle_data['launche_provider'], lauchesValue, msg=f"new_sfnarticle_data['launche_provider'] {new_sfnarticle_data["launche_provider"]}, lauchesValue {lauchesValue}") ''' def test_create_record_new_route(self): initial_sfnarticles_count = SFNArticles.objects.count() # print("initial_sfnarticles_count", initial_sfnarticles_count) new_sfnarticle_data = { "api_id": 0, "title": "ff", "url": "f2", "imageUrl": "f2", "newsSite": "f", "summary": "f", "updatedAt": "2021-02-13T13:34:02", "publishedAt": "2021-02-13T13:34:02", "featured": True, "article_launche_id": "article_launche_idarticle_launche_id", "article_launche_id_provider": "article_launche_id_providerarticle_launche_id_provider" } response = self.client.post('http://127.0.0.1:8000/articles/', data=new_sfnarticle_data) if response.status_code != 201: print(response.data) else: # response = self.client.get(f'http://127.0.0.1:8000/test_articles/')#{response.data['my_id']}') # OrderedDict with count response = self.client.get(f'http://127.0.0.1:8000/articles/{response.data['my_id']}/') # dict # print("response.data", response.data) new_sfnarticles_count = SFNArticles.objects.count() self.assertEqual(initial_sfnarticles_count + 1, new_sfnarticles_count, msg=f"response.data {response.data}") # print("new_sfnarticles_count", new_sfnarticles_count) for k, v in response.data.items(): if k != 'my_id' and k != 'articleslaunches' and k != 'articlesevents' and k != 'updatedAt' and k != 'publishedAt': self.assertEqual(new_sfnarticle_data[k], v, msg=f"new_sfnarticle_data[k] {new_sfnarticle_data[k]}, v {v}") elif k == 'updatedAt' or k == 'publishedAt': # print("type(v)", type(v)) datetime_v = datetime.strptime(v, DATETIME_FORMAT) str_v = datetime_v.strftime(DATETIME_FORMAT) # print("str_v", str_v) self.assertEqual(new_sfnarticle_data[k], str_v, msg=f"new_sfnarticle_data[k] {new_sfnarticle_data[k]}, str_v {str_v}") elif k == 'articlesevents': # print(f'NO CORRESPONDING VALUE k {k} v {v} INSERTED') # self.assertEqual([], v, msg=f"EXPECTED: {list()}") elif k == 'articleslaunches': for lauchesKey, lauchesValue in v[0].items(): if lauchesKey == 'article_launche_id': self.assertEqual(new_sfnarticle_data["article_launche_id"], lauchesValue, msg=f"new_sfnarticle_data['article_launche_id'] {new_sfnarticle_data["article_launche_id"]}, lauchesValue {lauchesValue}") elif lauchesKey == 'provider': self.assertEqual(new_sfnarticle_data['article_launche_id_provider'], lauchesValue, msg=f"new_sfnarticle_data['article_launche_id_provider'] {new_sfnarticle_data["article_launche_id_provider"]}, lauchesValue {lauchesValue}") elif lauchesKey == 'sfnarticles': self.assertEqual(response.data['my_id'], lauchesValue, msg=f"response.data['my_id'] {response.data["my_id"]}, lauchesValue {lauchesValue}") class SFNArticlesDestroyTestCase(APITestCase): ''' def create_sfnarticle_article(self): new_sfnarticle_data = { "api_id": 1, "title": "EMPTY", "url": "EMPTY", "imageUrl": "EMPTY", "newsSite": "EMPTY", "summary": "EMPTY", "updatedAt": "2022-02-13T13:34:02", "publishedAt": "2022-02-13T13:34:02", "featured": True, "event_id": "EMPTY", "event_provider": "EMPTY", "launche_id": "EMPTY", "launche_provider": "EMPTY", } response = self.client.post('http://127.0.0.1:8000/articles/', data=new_sfnarticle_data) if response.status_code != 200: print(response.data) return response def test_delete_sfnarticles(self): initial_sfnarticles_count = SFNArticles.objects.count() # print("initial_sfnarticles_count", initial_sfnarticles_count) response = self.create_sfnarticle_article() new_sfnarticles_count = SFNArticles.objects.count() self.assertEqual(new_sfnarticles_count - 1, initial_sfnarticles_count, msg=f"response.data {response.data}") # print("new_sfnarticles_count", new_sfnarticles_count) sfnarticles_my_id = SFNArticles.objects.first().my_id self.client.delete('http://127.0.0.1:8000/articles/{}/'.format(sfnarticles_my_id)) self.assertEqual( SFNArticles.objects.count(), new_sfnarticles_count - 1 ) self.assertRaises( SFNArticles.DoesNotExist, SFNArticles.objects.get, my_id=sfnarticles_my_id, ) ''' def create_sfnarticle_article_new_route(self): new_sfnarticle_data = { "f": "ff", "api_id": 14024, "title": "ff", "url": "ff", "imageUrl": "fff", "newsSite": "fffff", "summary": "ff", "updatedAt": "2022-02-21T11:59:55", "publishedAt": "2022-02-21T11:59:50", "featured": True } payload = json.dumps(new_sfnarticle_data) response = self.client.post('http://127.0.0.1:8000/articles/', data=payload, content_type="application/json") if response.status_code != 201: print(response.data) else: # response = self.client.get(f'http://127.0.0.1:8000/articles/')#{response.data['my_id']}') # OrderedDict with count response = self.client.get(f'http://127.0.0.1:8000/articles/{response.data['my_id']}/') # dict return response def test_delete_sfnarticles_new_route(self): initial_sfnarticles_count = SFNArticles.objects.count() # print("initial_sfnarticles_count", initial_sfnarticles_count) response = self.create_sfnarticle_article_new_route() new_sfnarticles_count = SFNArticles.objects.count() self.assertEqual(initial_sfnarticles_count + 1, new_sfnarticles_count, msg=f"response.data {response.data}") # print("new_sfnarticles_count", new_sfnarticles_count) sfnarticles_my_id = SFNArticles.objects.first().my_id self.client.delete('http://127.0.0.1:8000/articles/{}/'.format(sfnarticles_my_id)) self.assertEqual( SFNArticles.objects.count(), new_sfnarticles_count - 1 ) self.assertRaises( SFNArticles.DoesNotExist, SFNArticles.objects.get, my_id=sfnarticles_my_id, ) class SFNArticlesListTestCase(APITestCase): ''' def test_list_sfnarticles(self): initial_sfnarticles_count = SFNArticles.objects.count() # print("initial_sfnarticles_count", initial_sfnarticles_count) response = self.client.get('http://127.0.0.1:8000/articles/') # print('response.data', response.data) self.assertListEqual(list(), response.data['articles_data']) self.assertFalse(response.data['has_next']) self.assertFalse(response.data['has_previous']) self.assertEqual(response.data['current_page_number'], 1) self.assertEqual(response.data['num_pages'], 1) ''' def test_list_sfnarticles_new_route(self): initial_sfnarticles_count = SFNArticles.objects.count() # print("initial_sfnarticles_count", initial_sfnarticles_count) response = self.client.get('http://127.0.0.1:8000/articles/') # print('response.data', response.data) self.assertListEqual(list(), response.data['results']) self.assertIsNone(response.data['previous']) self.assertIsNone(response.data['next']) self.assertEqual(response.data['count'], 0) class SFNArticlesUpdateTestCase(APITestCase): ''' def create_sfnarticle_article(self): new_sfnarticle_data = { "api_id": 1, "title": "EMPTY", "url": "EMPTY", "imageUrl": "EMPTY", "newsSite": "EMPTY", "summary": "EMPTY", "updatedAt": "2022-02-13T13:34:02", "publishedAt": "2022-02-13T13:34:02", "featured": True, "event_id": "EMPTY", "event_provider": "EMPTY", "launche_id": "EMPTY", "launche_provider": "EMPTY", } response = self.client.post('http://127.0.0.1:8000/articles/', data=new_sfnarticle_data) if response.status_code != 200: print(response.data) return response def test_update_product(self): initial_sfnarticles_count = SFNArticles.objects.count() # print("initial_sfnarticles_count", initial_sfnarticles_count) response = self.create_sfnarticle_article() new_sfnarticles_count = SFNArticles.objects.count() self.assertEqual(new_sfnarticles_count - 1, initial_sfnarticles_count, msg=f"response.data {response.data}") # print("new_sfnarticles_count", new_sfnarticles_count) sfnarticles = SFNArticles.objects.first() payload = json.dumps({"title": "!!!!NOT_EMPTY!!!", "launche_id": "!!!!NOT_EMPTY!!!", }) response = self.client.put( path='http://127.0.0.1:8000/articles/{}/'.format(sfnarticles.my_id), data=payload, content_type="application/json" ) # print("response.data", response.data) self.assertEqual(# "Campo" não é atualidado "EMPTY", SFNArticlesLaunches.objects.all().first().article_launche_id ) self.assertEqual(# Campo é atualidado "!!!!NOT_EMPTY!!!", SFNArticles.objects.all().first().title ) ''' def create_sfnarticle_article_new_route(self): new_sfnarticle_data = { "api_id": 0, "title": "ff", "url": "f2", "imageUrl": "f2", "newsSite": "f", "summary": "f", "updatedAt": "2021-02-13T13:34:02", "publishedAt": "2021-02-13T13:34:02", "featured": True } response = self.client.post('http://127.0.0.1:8000/articles/', data=new_sfnarticle_data) if response.status_code != 201: print(response.data) else: # response = self.client.get(f'http://127.0.0.1:8000/test_articles/')#{response.data['my_id']}') # OrderedDict with count response = self.client.get(f'http://127.0.0.1:8000/articles/{response.data['my_id']}/') # dict return response def test_update_product_new_route(self): initial_sfnarticles_count = SFNArticles.objects.count() # print("initial_sfnarticles_count", initial_sfnarticles_count) response = self.create_sfnarticle_article_new_route() new_sfnarticles_count = SFNArticles.objects.count() self.assertEqual(new_sfnarticles_count - 1, initial_sfnarticles_count, msg=f"response.data {response.data}") # print("new_sfnarticles_count", new_sfnarticles_count) sfnarticles = SFNArticles.objects.first() payload = response.data # required payload.update({"title": "!!!!NOT_EMPTY!!!", "article_launche_id": "!!!!NOT_EMPTY!!!", # required with article_launche_id_provider "article_launche_id_provider": "!!!!NOT_EMPTY!!!" # required }) new_payload = json.dumps(payload) # print("type(new_payload)",type(new_payload),'new_payload', new_payload) response = self.client.put( path='http://127.0.0.1:8000/articles/{}/'.format(sfnarticles.my_id), data=new_payload, content_type="application/json" ) response = self.client.get(path='http://127.0.0.1:8000/articles/{}/'.format(sfnarticles.my_id)) # print("response.data", response.data) self.assertEqual( # "Campo" não é atualidado, apenas inserido "!!!!NOT_EMPTY!!!", SFNArticlesLaunches.objects.all().first().article_launche_id ) self.assertEqual( # "Campo" não é atualidado, apenas inserido "!!!!NOT_EMPTY!!!", SFNArticlesLaunches.objects.all().first().provider ) self.assertEqual(# Campo é atualidado "!!!!NOT_EMPTY!!!", SFNArticles.objects.all().first().title )
from django.test import TestCase from rest_framework.test import APITestCase from coodesh_app.models import SFNArticles, SFNArticlesLaunches import json from datetime import datetime from coodesh_app.management.commands.load_api_data import DATETIME_FORMAT # Create your tests here. class SFNArticlesCreateTestCase(APITestCase): ''' def test_create_record(self): initial_sfnarticles_count = SFNArticles.objects.count() # print("initial_sfnarticles_count", initial_sfnarticles_count) new_sfnarticle_data = { "api_id": 1, "title": "EMPTY", "url": "EMPTY", "imageUrl": "EMPTY", "newsSite": "EMPTY", "summary": "EMPTY", "updatedAt": "2022-02-13T13:34:02", "publishedAt": "2022-02-13T13:34:02", "featured": True, "event_id": "EMPTY", "event_provider": "EMPTY", "launche_id": "EMPTY", "launche_provider": "EMPTY", } response = self.client.post('http://127.0.0.1:8000/articles/', data=new_sfnarticle_data) if response.status_code != 200: print(response.data) new_sfnarticles_count = SFNArticles.objects.count() self.assertEqual(initial_sfnarticles_count + 1, new_sfnarticles_count, msg=f"response.data {response.data}") # print("new_sfnarticles_count", new_sfnarticles_count) for k, v in response.data.items(): if k != 'events' and k != 'lauches' and k != 'updatedAt' and k != 'publishedAt': self.assertEqual(new_sfnarticle_data[k], v, msg=f"new_sfnarticle_data[k] {new_sfnarticle_data[k]}, v {v}") elif k == 'updatedAt' or k == 'publishedAt': str_v = v.strftime(DATETIME_FORMAT) # print(f"str_v {str_v}") self.assertEqual(new_sfnarticle_data[k], str_v, msg=f"new_sfnarticle_data[k] {new_sfnarticle_data[k]}, str_v {str_v}") elif k == 'events': # print(v) for eventKey, eventValue in v[0].items(): if eventKey == 'article_event_id': self.assertEqual(new_sfnarticle_data["event_id"], eventValue, msg=f"new_sfnarticle_data['article_event_id'] {new_sfnarticle_data['event_id']}, eventValue {eventValue}") else: self.assertEqual(new_sfnarticle_data['event_provider'], eventValue, msg=f"new_sfnarticle_data['event_provider'] {new_sfnarticle_data['event_provider']}, eventValue {eventValue}") elif k == 'lauches': for lauchesKey, lauchesValue in v[0].items(): if lauchesKey == 'article_launche_id': self.assertEqual(new_sfnarticle_data["launche_id"], lauchesValue, msg=f"new_sfnarticle_data['article_launche_id'] {new_sfnarticle_data['launche_id']}, lauchesValue {lauchesValue}") else: self.assertEqual(new_sfnarticle_data['launche_provider'], lauchesValue, msg=f"new_sfnarticle_data['launche_provider'] {new_sfnarticle_data['launche_provider']}, lauchesValue {lauchesValue}") ''' def test_create_record_new_route(self): initial_sfnarticles_count = SFNArticles.objects.count() # print("initial_sfnarticles_count", initial_sfnarticles_count) new_sfnarticle_data = { "api_id": 0, "title": "ff", "url": "f2", "imageUrl": "f2", "newsSite": "f", "summary": "f", "updatedAt": "2021-02-13T13:34:02", "publishedAt": "2021-02-13T13:34:02", "featured": True, "article_launche_id": "article_launche_idarticle_launche_id", "article_launche_id_provider": "article_launche_id_providerarticle_launche_id_provider" } response = self.client.post('http://127.0.0.1:8000/articles/', data=new_sfnarticle_data) if response.status_code != 201: print(response.data) else: # response = self.client.get(f'http://127.0.0.1:8000/test_articles/')#{response.data["my_id"]}') # OrderedDict with count response = self.client.get(f'http://127.0.0.1:8000/articles/{response.data["my_id"]}/') # dict # print("response.data", response.data) new_sfnarticles_count = SFNArticles.objects.count() self.assertEqual(initial_sfnarticles_count + 1, new_sfnarticles_count, msg=f"response.data {response.data}") # print("new_sfnarticles_count", new_sfnarticles_count) for k, v in response.data.items(): if k != 'my_id' and k != 'articleslaunches' and k != 'articlesevents' and k != 'updatedAt' and k != 'publishedAt': self.assertEqual(new_sfnarticle_data[k], v, msg=f"new_sfnarticle_data[k] {new_sfnarticle_data[k]}, v {v}") elif k == 'updatedAt' or k == 'publishedAt': # print("type(v)", type(v)) datetime_v = datetime.strptime(v, DATETIME_FORMAT) str_v = datetime_v.strftime(DATETIME_FORMAT) # print("str_v", str_v) self.assertEqual(new_sfnarticle_data[k], str_v, msg=f"new_sfnarticle_data[k] {new_sfnarticle_data[k]}, str_v {str_v}") elif k == 'articlesevents': # print(f'NO CORRESPONDING VALUE k {k} v {v} INSERTED') # self.assertEqual([], v, msg=f"EXPECTED: {list()}") elif k == 'articleslaunches': for lauchesKey, lauchesValue in v[0].items(): if lauchesKey == 'article_launche_id': self.assertEqual(new_sfnarticle_data["article_launche_id"], lauchesValue, msg=f"new_sfnarticle_data['article_launche_id'] {new_sfnarticle_data['article_launche_id']}, lauchesValue {lauchesValue}") elif lauchesKey == 'provider': self.assertEqual(new_sfnarticle_data['article_launche_id_provider'], lauchesValue, msg=f"new_sfnarticle_data['article_launche_id_provider'] {new_sfnarticle_data['article_launche_id_provider']}, lauchesValue {lauchesValue}") elif lauchesKey == 'sfnarticles': self.assertEqual(response.data['my_id'], lauchesValue, msg=f"response.data['my_id'] {response.data['my_id']}, lauchesValue {lauchesValue}") class SFNArticlesDestroyTestCase(APITestCase): ''' def create_sfnarticle_article(self): new_sfnarticle_data = { "api_id": 1, "title": "EMPTY", "url": "EMPTY", "imageUrl": "EMPTY", "newsSite": "EMPTY", "summary": "EMPTY", "updatedAt": "2022-02-13T13:34:02", "publishedAt": "2022-02-13T13:34:02", "featured": True, "event_id": "EMPTY", "event_provider": "EMPTY", "launche_id": "EMPTY", "launche_provider": "EMPTY", } response = self.client.post('http://127.0.0.1:8000/articles/', data=new_sfnarticle_data) if response.status_code != 200: print(response.data) return response def test_delete_sfnarticles(self): initial_sfnarticles_count = SFNArticles.objects.count() # print("initial_sfnarticles_count", initial_sfnarticles_count) response = self.create_sfnarticle_article() new_sfnarticles_count = SFNArticles.objects.count() self.assertEqual(new_sfnarticles_count - 1, initial_sfnarticles_count, msg=f"response.data {response.data}") # print("new_sfnarticles_count", new_sfnarticles_count) sfnarticles_my_id = SFNArticles.objects.first().my_id self.client.delete('http://127.0.0.1:8000/articles/{}/'.format(sfnarticles_my_id)) self.assertEqual( SFNArticles.objects.count(), new_sfnarticles_count - 1 ) self.assertRaises( SFNArticles.DoesNotExist, SFNArticles.objects.get, my_id=sfnarticles_my_id, ) ''' def create_sfnarticle_article_new_route(self): new_sfnarticle_data = { "f": "ff", "api_id": 14024, "title": "ff", "url": "ff", "imageUrl": "fff", "newsSite": "fffff", "summary": "ff", "updatedAt": "2022-02-21T11:59:55", "publishedAt": "2022-02-21T11:59:50", "featured": True } payload = json.dumps(new_sfnarticle_data) response = self.client.post('http://127.0.0.1:8000/articles/', data=payload, content_type="application/json") if response.status_code != 201: print(response.data) else: # response = self.client.get(f'http://127.0.0.1:8000/articles/')#{response.data["my_id"]}') # OrderedDict with count response = self.client.get(f'http://127.0.0.1:8000/articles/{response.data["my_id"]}/') # dict return response def test_delete_sfnarticles_new_route(self): initial_sfnarticles_count = SFNArticles.objects.count() # print("initial_sfnarticles_count", initial_sfnarticles_count) response = self.create_sfnarticle_article_new_route() new_sfnarticles_count = SFNArticles.objects.count() self.assertEqual(initial_sfnarticles_count + 1, new_sfnarticles_count, msg=f"response.data {response.data}") # print("new_sfnarticles_count", new_sfnarticles_count) sfnarticles_my_id = SFNArticles.objects.first().my_id self.client.delete('http://127.0.0.1:8000/articles/{}/'.format(sfnarticles_my_id)) self.assertEqual( SFNArticles.objects.count(), new_sfnarticles_count - 1 ) self.assertRaises( SFNArticles.DoesNotExist, SFNArticles.objects.get, my_id=sfnarticles_my_id, ) class SFNArticlesListTestCase(APITestCase): ''' def test_list_sfnarticles(self): initial_sfnarticles_count = SFNArticles.objects.count() # print("initial_sfnarticles_count", initial_sfnarticles_count) response = self.client.get('http://127.0.0.1:8000/articles/') # print('response.data', response.data) self.assertListEqual(list(), response.data['articles_data']) self.assertFalse(response.data['has_next']) self.assertFalse(response.data['has_previous']) self.assertEqual(response.data['current_page_number'], 1) self.assertEqual(response.data['num_pages'], 1) ''' def test_list_sfnarticles_new_route(self): initial_sfnarticles_count = SFNArticles.objects.count() # print("initial_sfnarticles_count", initial_sfnarticles_count) response = self.client.get('http://127.0.0.1:8000/articles/') # print('response.data', response.data) self.assertListEqual(list(), response.data['results']) self.assertIsNone(response.data['previous']) self.assertIsNone(response.data['next']) self.assertEqual(response.data['count'], 0) class SFNArticlesUpdateTestCase(APITestCase): ''' def create_sfnarticle_article(self): new_sfnarticle_data = { "api_id": 1, "title": "EMPTY", "url": "EMPTY", "imageUrl": "EMPTY", "newsSite": "EMPTY", "summary": "EMPTY", "updatedAt": "2022-02-13T13:34:02", "publishedAt": "2022-02-13T13:34:02", "featured": True, "event_id": "EMPTY", "event_provider": "EMPTY", "launche_id": "EMPTY", "launche_provider": "EMPTY", } response = self.client.post('http://127.0.0.1:8000/articles/', data=new_sfnarticle_data) if response.status_code != 200: print(response.data) return response def test_update_product(self): initial_sfnarticles_count = SFNArticles.objects.count() # print("initial_sfnarticles_count", initial_sfnarticles_count) response = self.create_sfnarticle_article() new_sfnarticles_count = SFNArticles.objects.count() self.assertEqual(new_sfnarticles_count - 1, initial_sfnarticles_count, msg=f"response.data {response.data}") # print("new_sfnarticles_count", new_sfnarticles_count) sfnarticles = SFNArticles.objects.first() payload = json.dumps({"title": "!!!!NOT_EMPTY!!!", "launche_id": "!!!!NOT_EMPTY!!!", }) response = self.client.put( path='http://127.0.0.1:8000/articles/{}/'.format(sfnarticles.my_id), data=payload, content_type="application/json" ) # print("response.data", response.data) self.assertEqual(# "Campo" não é atualidado "EMPTY", SFNArticlesLaunches.objects.all().first().article_launche_id ) self.assertEqual(# Campo é atualidado "!!!!NOT_EMPTY!!!", SFNArticles.objects.all().first().title ) ''' def create_sfnarticle_article_new_route(self): new_sfnarticle_data = { "api_id": 0, "title": "ff", "url": "f2", "imageUrl": "f2", "newsSite": "f", "summary": "f", "updatedAt": "2021-02-13T13:34:02", "publishedAt": "2021-02-13T13:34:02", "featured": True } response = self.client.post('http://127.0.0.1:8000/articles/', data=new_sfnarticle_data) if response.status_code != 201: print(response.data) else: # response = self.client.get(f'http://127.0.0.1:8000/test_articles/')#{response.data["my_id"]}') # OrderedDict with count response = self.client.get(f'http://127.0.0.1:8000/articles/{response.data["my_id"]}/') # dict return response def test_update_product_new_route(self): initial_sfnarticles_count = SFNArticles.objects.count() # print("initial_sfnarticles_count", initial_sfnarticles_count) response = self.create_sfnarticle_article_new_route() new_sfnarticles_count = SFNArticles.objects.count() self.assertEqual(new_sfnarticles_count - 1, initial_sfnarticles_count, msg=f"response.data {response.data}") # print("new_sfnarticles_count", new_sfnarticles_count) sfnarticles = SFNArticles.objects.first() payload = response.data # required payload.update({"title": "!!!!NOT_EMPTY!!!", "article_launche_id": "!!!!NOT_EMPTY!!!", # required with article_launche_id_provider "article_launche_id_provider": "!!!!NOT_EMPTY!!!" # required }) new_payload = json.dumps(payload) # print("type(new_payload)",type(new_payload),'new_payload', new_payload) response = self.client.put( path='http://127.0.0.1:8000/articles/{}/'.format(sfnarticles.my_id), data=new_payload, content_type="application/json" ) response = self.client.get(path='http://127.0.0.1:8000/articles/{}/'.format(sfnarticles.my_id)) # print("response.data", response.data) self.assertEqual( # "Campo" não é atualidado, apenas inserido "!!!!NOT_EMPTY!!!", SFNArticlesLaunches.objects.all().first().article_launche_id ) self.assertEqual( # "Campo" não é atualidado, apenas inserido "!!!!NOT_EMPTY!!!", SFNArticlesLaunches.objects.all().first().provider ) self.assertEqual(# Campo é atualidado "!!!!NOT_EMPTY!!!", SFNArticles.objects.all().first().title )
# Copyright 2021 Google Research. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Generates template based utterances based on user and system actions. A template is defined for a few categories of actions for each service. First, each action is converted into a sentence by with the help of the corresponding template, optionally replacing the parameters with the associated slot values. The utterance is generated by concatenating the template based representations of each action present in the turn. """ import os VALUE_CHAR = "@" SEPARATOR = "!!" CONFIRM_PREFIX = "Please confirm the following details:" # The precedence order used for sorting actions before generating a templatized # utterance. ACT_PREFERENCE_ORDER = ("SELECT", "INFORM_COUNT", "NOTIFY_SUCCESS", "NOTIFY_FAILURE", "INFORM", "REQUEST", "CONFIRM") # The acts for which the selected template also depends on the intent. INTENT_CONDITIONED_ACTS = frozenset( ["NOTIFY_FAILURE", "NOTIFY_SUCCESS", "INFORM_COUNT", "OFFER_INTENT"]) def get_action_template(action, intent): """Returns a templatized representation of an action. For an action like INFORM(city=Napa) the return value is of the form: INFORM<SEPARATOR>city<SEPARATOR><VALUE_CHAR>. One occurrence of VALUE_CHAR indicates that the template handles the case when one value is passed. Args: action: DialogAction to be converted into a template. intent: The intent corresponding to the action. Returns: String template constructed as mentioned above. """ parts = [action["act"]] if action["act"] in INTENT_CONDITIONED_ACTS: if intent is None: raise ValueError("Intent is required for an intent conditioned act.") # Add the intent to the template key. parts.append(intent) if action["act"] == "INFORM_COUNT": parts.extend(["count", VALUE_CHAR]) elif action["slot"]: # Add the slot to the template key. parts.append(action["slot"]) # Add placeholder for values. For boolean slots, add the actual value. slot_values = action["values"] if slot_values: # Check for True, False and dontcare values. if slot_values[0] in ["True", "False", "dontcare"]: if len(slot_values) != 1: raise ValueError("Boolean slots can't have multiple values.") parts.append(slot_values[0]) else: parts.append(VALUE_CHAR * len(slot_values)) return SEPARATOR.join(parts) class TemplateUtteranceGenerator: """Generates template utterance for a dialogue turn.""" def __init__(self, template_dir, use_canonical_values=False): self._template_dir = template_dir self._templates_for_service = {} act_pref = {v: k for k, v in enumerate(ACT_PREFERENCE_ORDER)} # Key function used for sorting actions based on the preference order of # dialogue acts. self._act_key_fn = lambda action: act_pref.get(action["act"], len(act_pref)) self._use_canonical_values = use_canonical_values def _load_templates_for_service(self, service): """Load utterance templates for a service from the tsv file.""" tsv_path = os.path.join(self._template_dir, "{}.tsv".format(service)) if not os.path.exists(tsv_path): raise ValueError("Templates not defined for service: {}.".format(service)) act_key_to_template = {} with open(tsv_path) as f: for line in f: act_key, template_str = line.strip().split("\t") # Verify that the act_key and template_str are consistent. if act_key.count(VALUE_CHAR) != template_str.count(VALUE_CHAR): raise ValueError( "Template not consistent. act_key: {} template: {}".format( act_key, template_str)) act_key_to_template[act_key] = template_str self._templates_for_service[service] = act_key_to_template def _get_intent(self, action, frame): if action["act"] == "OFFER_INTENT" and action["slot"] == "intent": return action["values"][0] return frame.get("service_call", {}).get("method", None) def _get_utterance_for_action(self, service, intent, action, schema=None): """Converts an action to an utterance and also identifies slot spans. Args: service: The service corresponding to the action. intent: The intent corresponding to the action. action: A json object containing a dialogue action. schema: if given API schema, do lexicalization based on the schema Returns: The robot utterance corresponding to the action. """ if service not in self._templates_for_service: self._load_templates_for_service(service) act_key = get_action_template(action, intent) template_dict = self._templates_for_service[service] if act_key not in template_dict: raise ValueError(f"Template not defined for {act_key} for {service}.") template = template_dict[act_key] # Fill the placeholder characters in the template from action. value_idx = 0 offset = 0 for idx, char in enumerate(template): if char == VALUE_CHAR: if self._use_canonical_values: value = action["canonical_values"][value_idx] else: value = action["values"][value_idx] if schema: matched_slot = None for slot in schema["slots"]: if slot["name"] == action["slot"]: matched_slot = slot is_categorical = matched_slot[ "is_categorical"] if matched_slot else True replacement = value if is_categorical else f"<{action["slot"]}>" else: replacement = value value_idx += 1 else: continue char_ind = idx + offset template = template[:char_ind] + replacement + template[char_ind + 1:] offset += len(replacement) - 1 return template def get_delexicalized_utterance(self, turn, schema=None): """Delexicalize target utterances. Delexicalize the turn utterance based on given service schema, now only non-categorical slots would be delexicalized. For example, the utterance 'The restaurant is PizzaHut.' will be converted into 'The restaurant is <reataurant_name>.' Args: turn: turn object, containing utterance, action, slots information schema: SGD service schema, indicating if the slot is categorical Returns: delexicalized utterance. """ delexicalized_utterance = turn["utterance"] for frame in turn["frames"]: for action in sorted(frame["actions"], key=self._act_key_fn): for value in action["values"]: matched_slot = None for slot in schema["slots"]: if slot["name"] == action["slot"]: matched_slot = slot is_categorical = matched_slot[ "is_categorical"] if matched_slot else True replacement = value if is_categorical else f"<{action["slot"]}>" delexicalized_utterance = delexicalized_utterance.replace( value, replacement) return delexicalized_utterance def get_robot_utterance(self, turn, schema): """Get the robot utterance corresponding to a turn.""" # Use templates to generate an utterance for each action. All utterances are # then concatenated to give the resulting system utterance. utterances = [] for frame in turn["frames"]: # A trick here to make the CONFIRM action's utterance natural: Add # "Please confirm the following details:" in the front. all_acts = {action["act"] for action in frame["actions"]} if "CONFIRM" in all_acts: utterances.append(CONFIRM_PREFIX) for action in sorted(frame["actions"], key=self._act_key_fn): # Get the active intent corresponding to this action. intent = self._get_intent(action, frame) utterance = self._get_utterance_for_action(frame["service"], intent, action, schema) utterances.append(utterance) return " ".join(utterances)
# Copyright 2021 Google Research. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Generates template based utterances based on user and system actions. A template is defined for a few categories of actions for each service. First, each action is converted into a sentence by with the help of the corresponding template, optionally replacing the parameters with the associated slot values. The utterance is generated by concatenating the template based representations of each action present in the turn. """ import os VALUE_CHAR = "@" SEPARATOR = "!!" CONFIRM_PREFIX = "Please confirm the following details:" # The precedence order used for sorting actions before generating a templatized # utterance. ACT_PREFERENCE_ORDER = ("SELECT", "INFORM_COUNT", "NOTIFY_SUCCESS", "NOTIFY_FAILURE", "INFORM", "REQUEST", "CONFIRM") # The acts for which the selected template also depends on the intent. INTENT_CONDITIONED_ACTS = frozenset( ["NOTIFY_FAILURE", "NOTIFY_SUCCESS", "INFORM_COUNT", "OFFER_INTENT"]) def get_action_template(action, intent): """Returns a templatized representation of an action. For an action like INFORM(city=Napa) the return value is of the form: INFORM<SEPARATOR>city<SEPARATOR><VALUE_CHAR>. One occurrence of VALUE_CHAR indicates that the template handles the case when one value is passed. Args: action: DialogAction to be converted into a template. intent: The intent corresponding to the action. Returns: String template constructed as mentioned above. """ parts = [action["act"]] if action["act"] in INTENT_CONDITIONED_ACTS: if intent is None: raise ValueError("Intent is required for an intent conditioned act.") # Add the intent to the template key. parts.append(intent) if action["act"] == "INFORM_COUNT": parts.extend(["count", VALUE_CHAR]) elif action["slot"]: # Add the slot to the template key. parts.append(action["slot"]) # Add placeholder for values. For boolean slots, add the actual value. slot_values = action["values"] if slot_values: # Check for True, False and dontcare values. if slot_values[0] in ["True", "False", "dontcare"]: if len(slot_values) != 1: raise ValueError("Boolean slots can't have multiple values.") parts.append(slot_values[0]) else: parts.append(VALUE_CHAR * len(slot_values)) return SEPARATOR.join(parts) class TemplateUtteranceGenerator: """Generates template utterance for a dialogue turn.""" def __init__(self, template_dir, use_canonical_values=False): self._template_dir = template_dir self._templates_for_service = {} act_pref = {v: k for k, v in enumerate(ACT_PREFERENCE_ORDER)} # Key function used for sorting actions based on the preference order of # dialogue acts. self._act_key_fn = lambda action: act_pref.get(action["act"], len(act_pref)) self._use_canonical_values = use_canonical_values def _load_templates_for_service(self, service): """Load utterance templates for a service from the tsv file.""" tsv_path = os.path.join(self._template_dir, "{}.tsv".format(service)) if not os.path.exists(tsv_path): raise ValueError("Templates not defined for service: {}.".format(service)) act_key_to_template = {} with open(tsv_path) as f: for line in f: act_key, template_str = line.strip().split("\t") # Verify that the act_key and template_str are consistent. if act_key.count(VALUE_CHAR) != template_str.count(VALUE_CHAR): raise ValueError( "Template not consistent. act_key: {} template: {}".format( act_key, template_str)) act_key_to_template[act_key] = template_str self._templates_for_service[service] = act_key_to_template def _get_intent(self, action, frame): if action["act"] == "OFFER_INTENT" and action["slot"] == "intent": return action["values"][0] return frame.get("service_call", {}).get("method", None) def _get_utterance_for_action(self, service, intent, action, schema=None): """Converts an action to an utterance and also identifies slot spans. Args: service: The service corresponding to the action. intent: The intent corresponding to the action. action: A json object containing a dialogue action. schema: if given API schema, do lexicalization based on the schema Returns: The robot utterance corresponding to the action. """ if service not in self._templates_for_service: self._load_templates_for_service(service) act_key = get_action_template(action, intent) template_dict = self._templates_for_service[service] if act_key not in template_dict: raise ValueError(f"Template not defined for {act_key} for {service}.") template = template_dict[act_key] # Fill the placeholder characters in the template from action. value_idx = 0 offset = 0 for idx, char in enumerate(template): if char == VALUE_CHAR: if self._use_canonical_values: value = action["canonical_values"][value_idx] else: value = action["values"][value_idx] if schema: matched_slot = None for slot in schema["slots"]: if slot["name"] == action["slot"]: matched_slot = slot is_categorical = matched_slot[ "is_categorical"] if matched_slot else True replacement = value if is_categorical else f"<{action['slot']}>" else: replacement = value value_idx += 1 else: continue char_ind = idx + offset template = template[:char_ind] + replacement + template[char_ind + 1:] offset += len(replacement) - 1 return template def get_delexicalized_utterance(self, turn, schema=None): """Delexicalize target utterances. Delexicalize the turn utterance based on given service schema, now only non-categorical slots would be delexicalized. For example, the utterance 'The restaurant is PizzaHut.' will be converted into 'The restaurant is <reataurant_name>.' Args: turn: turn object, containing utterance, action, slots information schema: SGD service schema, indicating if the slot is categorical Returns: delexicalized utterance. """ delexicalized_utterance = turn["utterance"] for frame in turn["frames"]: for action in sorted(frame["actions"], key=self._act_key_fn): for value in action["values"]: matched_slot = None for slot in schema["slots"]: if slot["name"] == action["slot"]: matched_slot = slot is_categorical = matched_slot[ "is_categorical"] if matched_slot else True replacement = value if is_categorical else f"<{action['slot']}>" delexicalized_utterance = delexicalized_utterance.replace( value, replacement) return delexicalized_utterance def get_robot_utterance(self, turn, schema): """Get the robot utterance corresponding to a turn.""" # Use templates to generate an utterance for each action. All utterances are # then concatenated to give the resulting system utterance. utterances = [] for frame in turn["frames"]: # A trick here to make the CONFIRM action's utterance natural: Add # "Please confirm the following details:" in the front. all_acts = {action["act"] for action in frame["actions"]} if "CONFIRM" in all_acts: utterances.append(CONFIRM_PREFIX) for action in sorted(frame["actions"], key=self._act_key_fn): # Get the active intent corresponding to this action. intent = self._get_intent(action, frame) utterance = self._get_utterance_for_action(frame["service"], intent, action, schema) utterances.append(utterance) return " ".join(utterances)
# # This source file is part of the EdgeDB open source project. # # Copyright 2019-present MagicStack Inc. and the EdgeDB authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from __future__ import annotations import dataclasses import json from typing import * import immutables from edb import errors from edb.common import enum from edb.common import typeutils from edb.edgeql import qltypes from edb.schema import objects as s_obj from . import spec from . import types if TYPE_CHECKING: Mapping_T = TypeVar("Mapping_T", bound=Mapping[str, str]) class OpLevel(enum.StrEnum): SESSION = 'SESSION' SYSTEM = 'SYSTEM' class OpCode(enum.StrEnum): CONFIG_ADD = 'ADD' CONFIG_REM = 'REM' CONFIG_SET = 'SET' CONFIG_RESET = 'RESET' class Operation(NamedTuple): opcode: OpCode level: OpLevel setting_name: str value: Union[str, int, bool, None] def get_setting(self, spec: spec.Spec): try: return spec[self.setting_name] except KeyError: raise errors.ConfigurationError( f'unknown setting {self.setting_name!r}') from None def coerce_value(self, setting: spec.Setting, *, allow_missing: bool = False): if issubclass(setting.type, types.ConfigType): try: return setting.type.from_pyvalue( self.value, allow_missing=allow_missing) except (ValueError, TypeError): raise errors.ConfigurationError( f'invalid value type for the {setting.name!r} setting') elif setting.set_of: if self.value is None and allow_missing: return None elif not typeutils.is_container(self.value): raise errors.ConfigurationError( f'invalid value type for the ' f'{setting.name!r} setting') else: for v in self.value: if not isinstance(v, setting.type): raise errors.ConfigurationError( f'invalid value type for the ' f'{setting.name!r} setting') return frozenset(self.value) else: if isinstance(self.value, setting.type): return self.value elif self.value is None and allow_missing: return None else: raise errors.ConfigurationError( f'invalid value type for the {setting.name!r} setting') def apply(self, spec: spec.Spec, storage: Mapping_T) -> Mapping_T: setting = self.get_setting(spec) allow_missing = ( self.opcode is OpCode.CONFIG_REM or self.opcode is OpCode.CONFIG_RESET ) value = self.coerce_value(setting, allow_missing=allow_missing) if self.opcode is OpCode.CONFIG_SET: if issubclass(setting.type, types.ConfigType): raise errors.InternalServerError( f'unexpected CONFIGURE SET on a non-primitive ' f'configuration parameter: {self.setting_name}' ) storage = storage.set(self.setting_name, value) elif self.opcode is OpCode.CONFIG_RESET: if issubclass(setting.type, types.ConfigType): raise errors.InternalServerError( f'unexpected CONFIGURE RESET on a non-primitive ' f'configuration parameter: {self.setting_name}' ) try: storage = storage.delete(self.setting_name) except KeyError: pass elif self.opcode is OpCode.CONFIG_ADD: if not issubclass(setting.type, types.ConfigType): raise errors.InternalServerError( f'unexpected CONFIGURE SET += on a primitive ' f'configuration parameter: {self.setting_name}' ) exist_value = storage.get(self.setting_name, setting.default) if value in exist_value: props = [] for f in dataclasses.fields(setting.type): if f.compare: props.append(f.name) if len(props) > 1: props = f' ({', '.join(props)}) violate' else: props = f'.{props[0]} violates' raise errors.ConstraintViolationError( f'{setting.type.__name__}{props} ' f'exclusivity constriant' ) new_value = exist_value | {value} storage = storage.set(self.setting_name, new_value) elif self.opcode is OpCode.CONFIG_REM: if not issubclass(setting.type, types.ConfigType): raise errors.InternalServerError( f'unexpected CONFIGURE SET -= on a primitive ' f'configuration parameter: {self.setting_name}' ) exist_value = storage.get(self.setting_name, setting.default) new_value = exist_value - {value} storage = storage.set(self.setting_name, new_value) return storage @classmethod def from_json(cls, json_value: str) -> Operation: op_str, lev_str, name, value = json.loads(json_value) return Operation(OpCode(op_str), OpLevel(lev_str), name, value) def spec_to_json(spec: spec.Spec): dct = {} for setting in spec.values(): if issubclass(setting.type, str): typeid = s_obj.get_known_type_id('std::str') elif issubclass(setting.type, bool): typeid = s_obj.get_known_type_id('std::bool') elif issubclass(setting.type, int): typeid = s_obj.get_known_type_id('std::int64') elif issubclass(setting.type, types.ConfigType): typeid = setting.type.get_edgeql_typeid() else: raise RuntimeError( f'cannot serialize type for config setting {setting.name}') typemod = qltypes.TypeModifier.SINGLETON if setting.set_of: typemod = qltypes.TypeModifier.SET_OF dct[setting.name] = { 'default': value_to_json_value(setting, setting.default), 'internal': setting.internal, 'system': setting.system, 'typeid': str(typeid), 'typemod': str(typemod), } return json.dumps(dct) def value_to_json_value(setting: spec.Setting, value: Any): if setting.set_of: if issubclass(setting.type, types.ConfigType): return [v.to_json_value() for v in value] else: return list(value) else: if issubclass(setting.type, types.ConfigType): return value.to_json_value() else: return value def value_from_json_value(setting: spec.Setting, value: Any): if setting.set_of: if issubclass(setting.type, types.ConfigType): return frozenset(setting.type.from_json_value(v) for v in value) else: return frozenset(value) else: if issubclass(setting.type, types.ConfigType): return setting.type.from_json_value(value) else: return value def value_from_json(setting, value: str): return value_from_json_value(setting, json.loads(value)) def to_json(spec: spec.Spec, storage: Mapping) -> str: dct = {} for name, value in storage.items(): setting = spec[name] dct[name] = value_to_json_value(setting, value) return json.dumps(dct) def from_json(spec: spec.Spec, js: str) -> Mapping: with immutables.Map().mutate() as mm: dct = json.loads(js) if not isinstance(dct, dict): raise errors.ConfigurationError( 'invalid JSON: top-level dict was expected') for key, value in dct.items(): setting = spec.get(key) if setting is None: raise errors.ConfigurationError( f'invalid JSON: unknown setting name {key!r}') mm[key] = value_from_json_value(setting, value) return mm.finish() def lookup(spec: spec.Spec, name: str, *configs: Mapping, allow_unrecognized: bool = False): try: setting = spec[name] except (KeyError, TypeError): if allow_unrecognized: return None else: raise errors.ConfigurationError( f'unrecognized configuration parameter {name!r}') for c in configs: try: return c[name] except KeyError: pass return setting.default
# # This source file is part of the EdgeDB open source project. # # Copyright 2019-present MagicStack Inc. and the EdgeDB authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from __future__ import annotations import dataclasses import json from typing import * import immutables from edb import errors from edb.common import enum from edb.common import typeutils from edb.edgeql import qltypes from edb.schema import objects as s_obj from . import spec from . import types if TYPE_CHECKING: Mapping_T = TypeVar("Mapping_T", bound=Mapping[str, str]) class OpLevel(enum.StrEnum): SESSION = 'SESSION' SYSTEM = 'SYSTEM' class OpCode(enum.StrEnum): CONFIG_ADD = 'ADD' CONFIG_REM = 'REM' CONFIG_SET = 'SET' CONFIG_RESET = 'RESET' class Operation(NamedTuple): opcode: OpCode level: OpLevel setting_name: str value: Union[str, int, bool, None] def get_setting(self, spec: spec.Spec): try: return spec[self.setting_name] except KeyError: raise errors.ConfigurationError( f'unknown setting {self.setting_name!r}') from None def coerce_value(self, setting: spec.Setting, *, allow_missing: bool = False): if issubclass(setting.type, types.ConfigType): try: return setting.type.from_pyvalue( self.value, allow_missing=allow_missing) except (ValueError, TypeError): raise errors.ConfigurationError( f'invalid value type for the {setting.name!r} setting') elif setting.set_of: if self.value is None and allow_missing: return None elif not typeutils.is_container(self.value): raise errors.ConfigurationError( f'invalid value type for the ' f'{setting.name!r} setting') else: for v in self.value: if not isinstance(v, setting.type): raise errors.ConfigurationError( f'invalid value type for the ' f'{setting.name!r} setting') return frozenset(self.value) else: if isinstance(self.value, setting.type): return self.value elif self.value is None and allow_missing: return None else: raise errors.ConfigurationError( f'invalid value type for the {setting.name!r} setting') def apply(self, spec: spec.Spec, storage: Mapping_T) -> Mapping_T: setting = self.get_setting(spec) allow_missing = ( self.opcode is OpCode.CONFIG_REM or self.opcode is OpCode.CONFIG_RESET ) value = self.coerce_value(setting, allow_missing=allow_missing) if self.opcode is OpCode.CONFIG_SET: if issubclass(setting.type, types.ConfigType): raise errors.InternalServerError( f'unexpected CONFIGURE SET on a non-primitive ' f'configuration parameter: {self.setting_name}' ) storage = storage.set(self.setting_name, value) elif self.opcode is OpCode.CONFIG_RESET: if issubclass(setting.type, types.ConfigType): raise errors.InternalServerError( f'unexpected CONFIGURE RESET on a non-primitive ' f'configuration parameter: {self.setting_name}' ) try: storage = storage.delete(self.setting_name) except KeyError: pass elif self.opcode is OpCode.CONFIG_ADD: if not issubclass(setting.type, types.ConfigType): raise errors.InternalServerError( f'unexpected CONFIGURE SET += on a primitive ' f'configuration parameter: {self.setting_name}' ) exist_value = storage.get(self.setting_name, setting.default) if value in exist_value: props = [] for f in dataclasses.fields(setting.type): if f.compare: props.append(f.name) if len(props) > 1: props = f' ({", ".join(props)}) violate' else: props = f'.{props[0]} violates' raise errors.ConstraintViolationError( f'{setting.type.__name__}{props} ' f'exclusivity constriant' ) new_value = exist_value | {value} storage = storage.set(self.setting_name, new_value) elif self.opcode is OpCode.CONFIG_REM: if not issubclass(setting.type, types.ConfigType): raise errors.InternalServerError( f'unexpected CONFIGURE SET -= on a primitive ' f'configuration parameter: {self.setting_name}' ) exist_value = storage.get(self.setting_name, setting.default) new_value = exist_value - {value} storage = storage.set(self.setting_name, new_value) return storage @classmethod def from_json(cls, json_value: str) -> Operation: op_str, lev_str, name, value = json.loads(json_value) return Operation(OpCode(op_str), OpLevel(lev_str), name, value) def spec_to_json(spec: spec.Spec): dct = {} for setting in spec.values(): if issubclass(setting.type, str): typeid = s_obj.get_known_type_id('std::str') elif issubclass(setting.type, bool): typeid = s_obj.get_known_type_id('std::bool') elif issubclass(setting.type, int): typeid = s_obj.get_known_type_id('std::int64') elif issubclass(setting.type, types.ConfigType): typeid = setting.type.get_edgeql_typeid() else: raise RuntimeError( f'cannot serialize type for config setting {setting.name}') typemod = qltypes.TypeModifier.SINGLETON if setting.set_of: typemod = qltypes.TypeModifier.SET_OF dct[setting.name] = { 'default': value_to_json_value(setting, setting.default), 'internal': setting.internal, 'system': setting.system, 'typeid': str(typeid), 'typemod': str(typemod), } return json.dumps(dct) def value_to_json_value(setting: spec.Setting, value: Any): if setting.set_of: if issubclass(setting.type, types.ConfigType): return [v.to_json_value() for v in value] else: return list(value) else: if issubclass(setting.type, types.ConfigType): return value.to_json_value() else: return value def value_from_json_value(setting: spec.Setting, value: Any): if setting.set_of: if issubclass(setting.type, types.ConfigType): return frozenset(setting.type.from_json_value(v) for v in value) else: return frozenset(value) else: if issubclass(setting.type, types.ConfigType): return setting.type.from_json_value(value) else: return value def value_from_json(setting, value: str): return value_from_json_value(setting, json.loads(value)) def to_json(spec: spec.Spec, storage: Mapping) -> str: dct = {} for name, value in storage.items(): setting = spec[name] dct[name] = value_to_json_value(setting, value) return json.dumps(dct) def from_json(spec: spec.Spec, js: str) -> Mapping: with immutables.Map().mutate() as mm: dct = json.loads(js) if not isinstance(dct, dict): raise errors.ConfigurationError( 'invalid JSON: top-level dict was expected') for key, value in dct.items(): setting = spec.get(key) if setting is None: raise errors.ConfigurationError( f'invalid JSON: unknown setting name {key!r}') mm[key] = value_from_json_value(setting, value) return mm.finish() def lookup(spec: spec.Spec, name: str, *configs: Mapping, allow_unrecognized: bool = False): try: setting = spec[name] except (KeyError, TypeError): if allow_unrecognized: return None else: raise errors.ConfigurationError( f'unrecognized configuration parameter {name!r}') for c in configs: try: return c[name] except KeyError: pass return setting.default
""" This file (test_admin.py) contains the functional tests for the `admin` blueprint. These tests use GETs and POSTs to different endpoints to check for the proper behavior of the `admin` blueprint. """ import os import json import pytest from flask import request from flask import url_for from modules.box__default.admin.models import Role from modules.box__default.admin.models import User from modules.box__default.admin.models import role_user_link dirpath = os.path.dirname(os.path.abspath(__file__)) module_path = os.path.dirname(dirpath) module_info = None with open(os.path.join(module_path, "info.json")) as f: module_info = json.load(f) class TestAdminInvalidAuth: """ Test all admin routes for correct user authentication """ routes_get = [ "/", "/add", "/delete/<id>", "/edit/<id>", "/roles", "/roles/<role_id>/delete", ] routes_post = ["/update", "/roles/update", "/roles/add", "/add"] @pytest.mark.parametrize("route", routes_get) def test_redirect_if_not_logged_in_get(self, test_client, route, auth): auth.logout() response = test_client.get( f"{module_info["url_prefix"]}{route}", follow_redirects=True ) assert response.status_code == 200 assert request.path == url_for("auth.login") @pytest.mark.parametrize("route", routes_post) def test_redirect_if_not_logged_in_post(self, test_client, route, auth): auth.logout() response = test_client.post( f"{module_info["url_prefix"]}{route}", follow_redirects=True ) assert response.status_code == 200 assert request.path == url_for("auth.login") @pytest.mark.usefixtures("login_non_admin_user") @pytest.mark.parametrize("route", routes_get) def test_no_admin_access_if_not_admin_get(self, test_client, route): response = test_client.get( f"{module_info["url_prefix"]}{route}", follow_redirects=True ) assert response.status_code == 200 assert request.path == url_for("dashboard.index") assert b"You need to be an admin to view this page" in response.data @pytest.mark.usefixtures("login_non_admin_user") @pytest.mark.parametrize("route", routes_post) def test_no_admin_access_if_not_admin_post(self, test_client, route): response = test_client.post( f"{module_info["url_prefix"]}{route}", follow_redirects=True ) assert response.status_code == 200 assert request.path == url_for("dashboard.index") assert b"You need to be an admin to view this page" in response.data @pytest.mark.usefixtures("login_admin_user") class TestAdminAPI: """ Test all admin api post and get requests """ def test_admin_user_list_get(self, test_client): response = test_client.get(f"{module_info["url_prefix"]}/") assert response.status_code == 200 assert b"Admin" in response.data assert b"id" in response.data assert b"Email" in response.data assert b"Password" in response.data assert b"Roles" in response.data def test_admin_add_get(self, test_client): response = test_client.get(f"{module_info["url_prefix"]}/add") assert response.status_code == 200 assert b"Email" in response.data assert b"Password" in response.data assert b"First Name" in response.data assert b"Last Name" in response.data assert b"Admin User" in response.data def test_admin_add_unique_user_post(self, test_client): role1 = Role.create(name="test1-role") role2 = Role.create(name="test2-role") data = { "email": "test@gmail.com", "password": "pass", "first_name": "Test", "last_name": "User", "is_admin": "", f"role_{role1.id}": "", f"role_{role2.id}": "", } test_client.post( f"{module_info["url_prefix"]}/add", data=data, follow_redirects=True, ) test_user = User.query.filter(User.email == "test@gmail.com").scalar() assert test_user is not None assert test_user.first_name == "Test" assert test_user.last_name == "User" assert test_user.is_admin is False assert test_user.roles is not None assert len(test_user.roles) == 2 assert role1.users[0].email == "test@gmail.com" assert role2.users[0].email == "test@gmail.com" def test_admin_add_existing_user_post(self, test_client): User.create(email="test@gmail.com", password="pass") data = { "email": "test@gmail.com", "password": "pass", "first_name": "Test", "last_name": "User", "is_admin": "", } response = test_client.post( f"{module_info["url_prefix"]}/add", data=data, follow_redirects=True, ) test_users = User.query.filter(User.email == "test@gmail.com").count() assert response.status_code == 200 assert b"User with same email already exists" in response.data assert test_users == 1 def test_admin_delete_existing_user_get(self, test_client): user = User(email="test@gmail.com", password="pass") role1 = Role(name="test1-role") role2 = Role(name="test2-role") user.roles = [role1, role2] user.save() response = test_client.get( f"{module_info["url_prefix"]}/delete/{user.id}", follow_redirects=True, ) test_user = User.query.filter(User.email == user.email).scalar() test_roles = Role.query.count() user_role = ( User.query.join(role_user_link) .join(Role) .filter(User.id == user.id) .scalar() ) assert response.status_code == 200 assert test_user is None assert user_role is None assert test_roles == 2 def test_admin_delete_nonexisting_user_get(self, test_client): response = test_client.get( f"{module_info["url_prefix"]}/delete/some_id", follow_redirects=True, ) assert response.status_code == 200 assert b"Unable to delete. Invalid user id" in response.data def test_admin_edit_existing_user_get(self, test_client): user = User.create(email="test@gmail.com", password="pass") response = test_client.get( f"{module_info["url_prefix"]}/edit/{user.id}", ) assert response.status_code == 200 assert b"test@gmail.com" in response.data assert b"Edit User" in response.data def test_admin_edit_nonexisting_user_get(self, test_client): response = test_client.get( f"{module_info["url_prefix"]}/edit/some-id", follow_redirects=True ) assert response.status_code == 200 assert b"Invalid user id" in response.data assert request.path == f"{module_info["url_prefix"]}/" def test_admin_update_user_adding_new_roles_to_user(self, test_client): user = User.create(email="foo@gmail.com", password="pass") role1 = Role.create(name="test1-role") role2 = Role.create(name="test2-role") data = { "id": str(user.id), "email": "bar@gmail.com", "password": "newpass", "first_name": "Test", "last_name": "User", "is_admin": "", f"role_{role1.id}": "", f"role_{role2.id}": "", } response = test_client.post( f"{module_info["url_prefix"]}/update", data=data, follow_redirects=True, ) assert response.status_code == 200 assert user.email == "bar@gmail.com" assert user.check_hash("newpass") assert user.first_name == "Test" assert user.last_name == "User" assert len(user.roles) == 2 assert role1.users[0].email == "bar@gmail.com" assert role2.users[0].email == "bar@gmail.com" def test_admin_update_user_remove_old_roles_from_user(self, test_client): user = User(email="foo@gmail.com", password="pass", is_admin=True) user.set_hash("pass") user.is_admin = True role1 = Role(name="test1-role") role2 = Role(name="test2-role") user.roles = [role1, role2] user.save() data = { "id": str(user.id), "email": "bar@gmail.com", "first_name": "Test", "last_name": "User", "password": " ", "is_admin": None, } response = test_client.post( f"{module_info["url_prefix"]}/update", data=data, follow_redirects=True, ) assert response.status_code == 200 assert user.email == "bar@gmail.com" assert user.check_hash("pass") assert len(user.roles) == 0 assert len(role1.users) == 0 assert len(role2.users) == 0 def test_admin_roles_get(self, test_client): response = test_client.get(f"{module_info["url_prefix"]}/roles") assert response.status_code == 200 assert b"Roles" in response.data def test_admin_roles_add_nonexisting_role_post(self, test_client): response = test_client.post( f"{module_info["url_prefix"]}/roles/add", data=dict(name="new-role"), follow_redirects=True, ) role = Role.query.filter(Role.name == "new-role").scalar() role_count = Role.query.count() assert response.status_code == 200 assert role is not None assert role_count == 1 def test_admin_roles_add_existing_role_post(self, test_client): Role.create(name="new-role") response = test_client.post( f"{module_info["url_prefix"]}/roles/add", data=dict(name="new-role"), follow_redirects=True, ) role_count = Role.query.count() role = Role.query.filter(Role.name == "new-role").scalar() assert response.status_code == 200 assert b"Role already exists" in response.data assert role is not None assert role_count == 1 def test_admin_roles_delete_nonexisting_role_get(self, test_client): response = test_client.get( f"{module_info["url_prefix"]}/roles/some-id/delete", follow_redirects=True, ) assert response.status_code == 200 assert request.path == f"{module_info["url_prefix"]}/roles" assert b"Unable to delete. Invalid role id" in response.data def test_admin_roles_delete_existing_role_get(self, test_client): role1 = Role.create(name="new-role1") role2 = Role.create(name="new-role2") response = test_client.get( f"{module_info["url_prefix"]}/roles/{role1.id}/delete", follow_redirects=True, ) roles = Role.query.all() assert response.status_code == 200 assert request.path == f"{module_info["url_prefix"]}/roles" assert b"Role successfully deleted" in response.data assert roles is not None assert roles[0].name == role2.name assert len(roles) == 1 def test_admin_roles_update_nonexisting_role_post(self, test_client): response = test_client.post( f"{module_info["url_prefix"]}/roles/update", data=dict(role_id="some-id"), follow_redirects=True, ) roles = Role.query.count() assert response.status_code == 200 assert request.path == f"{module_info["url_prefix"]}/roles" assert b"Unable to update. Role does not exist" in response.data assert roles == 0 def test_admin_roles_update_existing_role_post(self, test_client): new_role = Role.create(name="new-role1") response = test_client.post( f"{module_info["url_prefix"]}/roles/update", data=dict(role_id=new_role.id, role_name="update-role"), follow_redirects=True, ) role = Role.query.scalar() assert response.status_code == 200 assert request.path == f"{module_info["url_prefix"]}/roles" assert b"Role successfully updated" in response.data assert role is not None assert role.name == "update-role"
""" This file (test_admin.py) contains the functional tests for the `admin` blueprint. These tests use GETs and POSTs to different endpoints to check for the proper behavior of the `admin` blueprint. """ import os import json import pytest from flask import request from flask import url_for from modules.box__default.admin.models import Role from modules.box__default.admin.models import User from modules.box__default.admin.models import role_user_link dirpath = os.path.dirname(os.path.abspath(__file__)) module_path = os.path.dirname(dirpath) module_info = None with open(os.path.join(module_path, "info.json")) as f: module_info = json.load(f) class TestAdminInvalidAuth: """ Test all admin routes for correct user authentication """ routes_get = [ "/", "/add", "/delete/<id>", "/edit/<id>", "/roles", "/roles/<role_id>/delete", ] routes_post = ["/update", "/roles/update", "/roles/add", "/add"] @pytest.mark.parametrize("route", routes_get) def test_redirect_if_not_logged_in_get(self, test_client, route, auth): auth.logout() response = test_client.get( f"{module_info['url_prefix']}{route}", follow_redirects=True ) assert response.status_code == 200 assert request.path == url_for("auth.login") @pytest.mark.parametrize("route", routes_post) def test_redirect_if_not_logged_in_post(self, test_client, route, auth): auth.logout() response = test_client.post( f"{module_info['url_prefix']}{route}", follow_redirects=True ) assert response.status_code == 200 assert request.path == url_for("auth.login") @pytest.mark.usefixtures("login_non_admin_user") @pytest.mark.parametrize("route", routes_get) def test_no_admin_access_if_not_admin_get(self, test_client, route): response = test_client.get( f"{module_info['url_prefix']}{route}", follow_redirects=True ) assert response.status_code == 200 assert request.path == url_for("dashboard.index") assert b"You need to be an admin to view this page" in response.data @pytest.mark.usefixtures("login_non_admin_user") @pytest.mark.parametrize("route", routes_post) def test_no_admin_access_if_not_admin_post(self, test_client, route): response = test_client.post( f"{module_info['url_prefix']}{route}", follow_redirects=True ) assert response.status_code == 200 assert request.path == url_for("dashboard.index") assert b"You need to be an admin to view this page" in response.data @pytest.mark.usefixtures("login_admin_user") class TestAdminAPI: """ Test all admin api post and get requests """ def test_admin_user_list_get(self, test_client): response = test_client.get(f"{module_info['url_prefix']}/") assert response.status_code == 200 assert b"Admin" in response.data assert b"id" in response.data assert b"Email" in response.data assert b"Password" in response.data assert b"Roles" in response.data def test_admin_add_get(self, test_client): response = test_client.get(f"{module_info['url_prefix']}/add") assert response.status_code == 200 assert b"Email" in response.data assert b"Password" in response.data assert b"First Name" in response.data assert b"Last Name" in response.data assert b"Admin User" in response.data def test_admin_add_unique_user_post(self, test_client): role1 = Role.create(name="test1-role") role2 = Role.create(name="test2-role") data = { "email": "test@gmail.com", "password": "pass", "first_name": "Test", "last_name": "User", "is_admin": "", f"role_{role1.id}": "", f"role_{role2.id}": "", } test_client.post( f"{module_info['url_prefix']}/add", data=data, follow_redirects=True, ) test_user = User.query.filter(User.email == "test@gmail.com").scalar() assert test_user is not None assert test_user.first_name == "Test" assert test_user.last_name == "User" assert test_user.is_admin is False assert test_user.roles is not None assert len(test_user.roles) == 2 assert role1.users[0].email == "test@gmail.com" assert role2.users[0].email == "test@gmail.com" def test_admin_add_existing_user_post(self, test_client): User.create(email="test@gmail.com", password="pass") data = { "email": "test@gmail.com", "password": "pass", "first_name": "Test", "last_name": "User", "is_admin": "", } response = test_client.post( f"{module_info['url_prefix']}/add", data=data, follow_redirects=True, ) test_users = User.query.filter(User.email == "test@gmail.com").count() assert response.status_code == 200 assert b"User with same email already exists" in response.data assert test_users == 1 def test_admin_delete_existing_user_get(self, test_client): user = User(email="test@gmail.com", password="pass") role1 = Role(name="test1-role") role2 = Role(name="test2-role") user.roles = [role1, role2] user.save() response = test_client.get( f"{module_info['url_prefix']}/delete/{user.id}", follow_redirects=True, ) test_user = User.query.filter(User.email == user.email).scalar() test_roles = Role.query.count() user_role = ( User.query.join(role_user_link) .join(Role) .filter(User.id == user.id) .scalar() ) assert response.status_code == 200 assert test_user is None assert user_role is None assert test_roles == 2 def test_admin_delete_nonexisting_user_get(self, test_client): response = test_client.get( f"{module_info['url_prefix']}/delete/some_id", follow_redirects=True, ) assert response.status_code == 200 assert b"Unable to delete. Invalid user id" in response.data def test_admin_edit_existing_user_get(self, test_client): user = User.create(email="test@gmail.com", password="pass") response = test_client.get( f"{module_info['url_prefix']}/edit/{user.id}", ) assert response.status_code == 200 assert b"test@gmail.com" in response.data assert b"Edit User" in response.data def test_admin_edit_nonexisting_user_get(self, test_client): response = test_client.get( f"{module_info['url_prefix']}/edit/some-id", follow_redirects=True ) assert response.status_code == 200 assert b"Invalid user id" in response.data assert request.path == f"{module_info['url_prefix']}/" def test_admin_update_user_adding_new_roles_to_user(self, test_client): user = User.create(email="foo@gmail.com", password="pass") role1 = Role.create(name="test1-role") role2 = Role.create(name="test2-role") data = { "id": str(user.id), "email": "bar@gmail.com", "password": "newpass", "first_name": "Test", "last_name": "User", "is_admin": "", f"role_{role1.id}": "", f"role_{role2.id}": "", } response = test_client.post( f"{module_info['url_prefix']}/update", data=data, follow_redirects=True, ) assert response.status_code == 200 assert user.email == "bar@gmail.com" assert user.check_hash("newpass") assert user.first_name == "Test" assert user.last_name == "User" assert len(user.roles) == 2 assert role1.users[0].email == "bar@gmail.com" assert role2.users[0].email == "bar@gmail.com" def test_admin_update_user_remove_old_roles_from_user(self, test_client): user = User(email="foo@gmail.com", password="pass", is_admin=True) user.set_hash("pass") user.is_admin = True role1 = Role(name="test1-role") role2 = Role(name="test2-role") user.roles = [role1, role2] user.save() data = { "id": str(user.id), "email": "bar@gmail.com", "first_name": "Test", "last_name": "User", "password": " ", "is_admin": None, } response = test_client.post( f"{module_info['url_prefix']}/update", data=data, follow_redirects=True, ) assert response.status_code == 200 assert user.email == "bar@gmail.com" assert user.check_hash("pass") assert len(user.roles) == 0 assert len(role1.users) == 0 assert len(role2.users) == 0 def test_admin_roles_get(self, test_client): response = test_client.get(f"{module_info['url_prefix']}/roles") assert response.status_code == 200 assert b"Roles" in response.data def test_admin_roles_add_nonexisting_role_post(self, test_client): response = test_client.post( f"{module_info['url_prefix']}/roles/add", data=dict(name="new-role"), follow_redirects=True, ) role = Role.query.filter(Role.name == "new-role").scalar() role_count = Role.query.count() assert response.status_code == 200 assert role is not None assert role_count == 1 def test_admin_roles_add_existing_role_post(self, test_client): Role.create(name="new-role") response = test_client.post( f"{module_info['url_prefix']}/roles/add", data=dict(name="new-role"), follow_redirects=True, ) role_count = Role.query.count() role = Role.query.filter(Role.name == "new-role").scalar() assert response.status_code == 200 assert b"Role already exists" in response.data assert role is not None assert role_count == 1 def test_admin_roles_delete_nonexisting_role_get(self, test_client): response = test_client.get( f"{module_info['url_prefix']}/roles/some-id/delete", follow_redirects=True, ) assert response.status_code == 200 assert request.path == f"{module_info['url_prefix']}/roles" assert b"Unable to delete. Invalid role id" in response.data def test_admin_roles_delete_existing_role_get(self, test_client): role1 = Role.create(name="new-role1") role2 = Role.create(name="new-role2") response = test_client.get( f"{module_info['url_prefix']}/roles/{role1.id}/delete", follow_redirects=True, ) roles = Role.query.all() assert response.status_code == 200 assert request.path == f"{module_info['url_prefix']}/roles" assert b"Role successfully deleted" in response.data assert roles is not None assert roles[0].name == role2.name assert len(roles) == 1 def test_admin_roles_update_nonexisting_role_post(self, test_client): response = test_client.post( f"{module_info['url_prefix']}/roles/update", data=dict(role_id="some-id"), follow_redirects=True, ) roles = Role.query.count() assert response.status_code == 200 assert request.path == f"{module_info['url_prefix']}/roles" assert b"Unable to update. Role does not exist" in response.data assert roles == 0 def test_admin_roles_update_existing_role_post(self, test_client): new_role = Role.create(name="new-role1") response = test_client.post( f"{module_info['url_prefix']}/roles/update", data=dict(role_id=new_role.id, role_name="update-role"), follow_redirects=True, ) role = Role.query.scalar() assert response.status_code == 200 assert request.path == f"{module_info['url_prefix']}/roles" assert b"Role successfully updated" in response.data assert role is not None assert role.name == "update-role"
import re import pytest from invoke.context import Context from test.test_utils import LOGGER, ec2, get_framework_and_version_from_tag def test_stray_files(image): """ Test to ensure that unnecessary build artifacts are not present in any easily visible or tmp directories :param image: ECR image URI """ ctx = Context() container_name = f"test_tmp_dirs-{image.split("/")[-1].replace(".", "-").replace(":", "-")}" _start_container(container_name, image, ctx) # Running list of artifacts/artifact regular expressions we do not want in any of the directories stray_artifacts = [r"\.py"] # Running list of allowed files in the /tmp directory allowed_tmp_files = ["hsperfdata_root"] # Ensure stray artifacts are not in the tmp directory tmp = _run_cmd_on_container(container_name, ctx, "ls -A /tmp") _assert_artifact_free(tmp, stray_artifacts) # Ensure tmp dir is empty except for whitelisted files tmp_files = tmp.stdout.split() for tmp_file in tmp_files: assert tmp_file in allowed_tmp_files, f"Found unexpected file in tmp dir: {tmp_file}. " \ f"Allowed tmp files: {allowed_tmp_files}" # We always expect /var/tmp to be empty var_tmp = _run_cmd_on_container(container_name, ctx, "ls -A /var/tmp") _assert_artifact_free(var_tmp, stray_artifacts) assert var_tmp.stdout.strip() == "" # Additional check of home and root directories to ensure that stray artifacts are not present home = _run_cmd_on_container(container_name, ctx, "ls -A ~") _assert_artifact_free(home, stray_artifacts) root = _run_cmd_on_container(container_name, ctx, "ls -A /") _assert_artifact_free(root, stray_artifacts) def test_python_version(image): """ Check that the python version in the image tag is the same as the one on a running container. :param image: ECR image URI """ ctx = Context() container_name = f"py-version-{image.split("/")[-1].replace(".", "-").replace(":", "-")}" py_version = "" for tag_split in image.split('-'): if tag_split.startswith('py'): py_version = f"Python {tag_split[2]}.{tag_split[3]}" _start_container(container_name, image, ctx) output = _run_cmd_on_container(container_name, ctx, "python --version") container_py_version = output.stdout # Due to py2 deprecation, Python2 version gets streamed to stderr. Python installed via Conda also appears to # stream to stderr, hence the pytorch condition. if "Python 2" in py_version or "pytorch" in image: container_py_version = output.stderr assert py_version in container_py_version, f"Cannot find {py_version} in {container_py_version}" def test_ubuntu_version(image): """ Check that the ubuntu version in the image tag is the same as the one on a running container. :param image: ECR image URI """ ctx = Context() container_name = f"ubuntu-version-{image.split("/")[-1].replace(".", "-").replace(":", "-")}" ubuntu_version = "" for tag_split in image.split('-'): if tag_split.startswith('ubuntu'): ubuntu_version = tag_split.split("ubuntu")[-1] _start_container(container_name, image, ctx) output = _run_cmd_on_container(container_name, ctx, "cat /etc/os-release") container_ubuntu_version = output.stdout assert "Ubuntu" in container_ubuntu_version assert ubuntu_version in container_ubuntu_version def test_framework_version_cpu(cpu): """ Check that the framework version in the image tag is the same as the one on a running container. :param cpu: ECR image URI with "cpu" in the name """ image = cpu if "tensorflow-inference" in image: pytest.skip(msg="TF inference does not have core tensorflow installed") tested_framework, tag_framework_version = get_framework_and_version_from_tag(image) # Module name is torch if tested_framework == "pytorch": tested_framework = "torch" ctx = Context() container_name = f"framework-version-{image.split("/")[-1].replace(".", "-").replace(":", "-")}" _start_container(container_name, image, ctx) output = _run_cmd_on_container( container_name, ctx, f"import {tested_framework}; print({tested_framework}.__version__)", executable="python" ) assert tag_framework_version == output.stdout.strip() @pytest.mark.parametrize("ec2_instance_type", ['p2.xlarge'], indirect=True) def test_framework_version_gpu(gpu, ec2_connection): """ Check that the framework version in the image tag is the same as the one on a running container. :param gpu: ECR image URI with "gpu" in the name :param ec2_connection: fixture to establish connection with an ec2 instance """ image = gpu if "tensorflow-inference" in image: pytest.skip(msg="TF inference does not have core tensorflow installed") tested_framework, tag_framework_version = get_framework_and_version_from_tag(image) # Module name is "torch" if tested_framework == "pytorch": tested_framework = "torch" cmd = f'import {tested_framework}; print({tested_framework}.__version__)' output = ec2.execute_ec2_training_test(ec2_connection, image, cmd, executable="python") assert tag_framework_version == output.stdout.strip() @pytest.mark.canary("Run pip check test regularly on production images") def test_pip_check(image): """ Ensure there are no broken requirements on the containers by running "pip check" :param image: ECR image URI """ # Add null entrypoint to ensure command exits immediately ctx = Context() gpu_suffix = '-gpu' if 'gpu' in image else '' # TF inference containers do not have core tensorflow installed by design. Allowing for this pip check error # to occur in order to catch other pip check issues that may be associated with TF inference allowed_exception = re.compile(rf'^tensorflow-serving-api{gpu_suffix} \d\.\d+\.\d+ requires ' rf'tensorflow{gpu_suffix}, which is not installed.$') output = ctx.run(f"docker run --entrypoint='' {image} pip check", hide=True, warn=True) if output.return_code != 0: if not allowed_exception.match(output.stdout): # Rerun pip check test if this is an unexpected failure ctx.run(f"docker run --entrypoint='' {image} pip check", hide=True) def _start_container(container_name, image_uri, context): """ Helper function to start a container locally :param container_name: Name of the docker container :param image_uri: ECR image URI :param context: Invoke context object """ context.run( f"docker run --entrypoint='/bin/bash' --name {container_name} -itd {image_uri}", hide=True, ) def _run_cmd_on_container(container_name, context, cmd, executable="bash"): """ Helper function to run commands on a locally running container :param container_name: Name of the docker container :param context: ECR image URI :param cmd: Command to run on the container :param executable: Executable to run on the container (bash or python) :return: invoke output, can be used to parse stdout, etc """ if executable not in ("bash", "python"): LOGGER.warn(f"Unrecognized executable {executable}. It will be run as {executable} -c '{cmd}'") return context.run(f"docker exec --user root {container_name} {executable} -c '{cmd}'", hide=True, timeout=30) def _assert_artifact_free(output, stray_artifacts): """ Manage looping through assertions to determine that directories don't have known stray files. :param output: Invoke result object :param stray_artifacts: List of things that should not be present in these directories """ for artifact in stray_artifacts: assert not re.search(artifact, output.stdout), \ f"Matched {artifact} in {output.stdout} while running {output.command}"
import re import pytest from invoke.context import Context from test.test_utils import LOGGER, ec2, get_framework_and_version_from_tag def test_stray_files(image): """ Test to ensure that unnecessary build artifacts are not present in any easily visible or tmp directories :param image: ECR image URI """ ctx = Context() container_name = f"test_tmp_dirs-{image.split('/')[-1].replace('.', '-').replace(':', '-')}" _start_container(container_name, image, ctx) # Running list of artifacts/artifact regular expressions we do not want in any of the directories stray_artifacts = [r"\.py"] # Running list of allowed files in the /tmp directory allowed_tmp_files = ["hsperfdata_root"] # Ensure stray artifacts are not in the tmp directory tmp = _run_cmd_on_container(container_name, ctx, "ls -A /tmp") _assert_artifact_free(tmp, stray_artifacts) # Ensure tmp dir is empty except for whitelisted files tmp_files = tmp.stdout.split() for tmp_file in tmp_files: assert tmp_file in allowed_tmp_files, f"Found unexpected file in tmp dir: {tmp_file}. " \ f"Allowed tmp files: {allowed_tmp_files}" # We always expect /var/tmp to be empty var_tmp = _run_cmd_on_container(container_name, ctx, "ls -A /var/tmp") _assert_artifact_free(var_tmp, stray_artifacts) assert var_tmp.stdout.strip() == "" # Additional check of home and root directories to ensure that stray artifacts are not present home = _run_cmd_on_container(container_name, ctx, "ls -A ~") _assert_artifact_free(home, stray_artifacts) root = _run_cmd_on_container(container_name, ctx, "ls -A /") _assert_artifact_free(root, stray_artifacts) def test_python_version(image): """ Check that the python version in the image tag is the same as the one on a running container. :param image: ECR image URI """ ctx = Context() container_name = f"py-version-{image.split('/')[-1].replace('.', '-').replace(':', '-')}" py_version = "" for tag_split in image.split('-'): if tag_split.startswith('py'): py_version = f"Python {tag_split[2]}.{tag_split[3]}" _start_container(container_name, image, ctx) output = _run_cmd_on_container(container_name, ctx, "python --version") container_py_version = output.stdout # Due to py2 deprecation, Python2 version gets streamed to stderr. Python installed via Conda also appears to # stream to stderr, hence the pytorch condition. if "Python 2" in py_version or "pytorch" in image: container_py_version = output.stderr assert py_version in container_py_version, f"Cannot find {py_version} in {container_py_version}" def test_ubuntu_version(image): """ Check that the ubuntu version in the image tag is the same as the one on a running container. :param image: ECR image URI """ ctx = Context() container_name = f"ubuntu-version-{image.split('/')[-1].replace('.', '-').replace(':', '-')}" ubuntu_version = "" for tag_split in image.split('-'): if tag_split.startswith('ubuntu'): ubuntu_version = tag_split.split("ubuntu")[-1] _start_container(container_name, image, ctx) output = _run_cmd_on_container(container_name, ctx, "cat /etc/os-release") container_ubuntu_version = output.stdout assert "Ubuntu" in container_ubuntu_version assert ubuntu_version in container_ubuntu_version def test_framework_version_cpu(cpu): """ Check that the framework version in the image tag is the same as the one on a running container. :param cpu: ECR image URI with "cpu" in the name """ image = cpu if "tensorflow-inference" in image: pytest.skip(msg="TF inference does not have core tensorflow installed") tested_framework, tag_framework_version = get_framework_and_version_from_tag(image) # Module name is torch if tested_framework == "pytorch": tested_framework = "torch" ctx = Context() container_name = f"framework-version-{image.split('/')[-1].replace('.', '-').replace(':', '-')}" _start_container(container_name, image, ctx) output = _run_cmd_on_container( container_name, ctx, f"import {tested_framework}; print({tested_framework}.__version__)", executable="python" ) assert tag_framework_version == output.stdout.strip() @pytest.mark.parametrize("ec2_instance_type", ['p2.xlarge'], indirect=True) def test_framework_version_gpu(gpu, ec2_connection): """ Check that the framework version in the image tag is the same as the one on a running container. :param gpu: ECR image URI with "gpu" in the name :param ec2_connection: fixture to establish connection with an ec2 instance """ image = gpu if "tensorflow-inference" in image: pytest.skip(msg="TF inference does not have core tensorflow installed") tested_framework, tag_framework_version = get_framework_and_version_from_tag(image) # Module name is "torch" if tested_framework == "pytorch": tested_framework = "torch" cmd = f'import {tested_framework}; print({tested_framework}.__version__)' output = ec2.execute_ec2_training_test(ec2_connection, image, cmd, executable="python") assert tag_framework_version == output.stdout.strip() @pytest.mark.canary("Run pip check test regularly on production images") def test_pip_check(image): """ Ensure there are no broken requirements on the containers by running "pip check" :param image: ECR image URI """ # Add null entrypoint to ensure command exits immediately ctx = Context() gpu_suffix = '-gpu' if 'gpu' in image else '' # TF inference containers do not have core tensorflow installed by design. Allowing for this pip check error # to occur in order to catch other pip check issues that may be associated with TF inference allowed_exception = re.compile(rf'^tensorflow-serving-api{gpu_suffix} \d\.\d+\.\d+ requires ' rf'tensorflow{gpu_suffix}, which is not installed.$') output = ctx.run(f"docker run --entrypoint='' {image} pip check", hide=True, warn=True) if output.return_code != 0: if not allowed_exception.match(output.stdout): # Rerun pip check test if this is an unexpected failure ctx.run(f"docker run --entrypoint='' {image} pip check", hide=True) def _start_container(container_name, image_uri, context): """ Helper function to start a container locally :param container_name: Name of the docker container :param image_uri: ECR image URI :param context: Invoke context object """ context.run( f"docker run --entrypoint='/bin/bash' --name {container_name} -itd {image_uri}", hide=True, ) def _run_cmd_on_container(container_name, context, cmd, executable="bash"): """ Helper function to run commands on a locally running container :param container_name: Name of the docker container :param context: ECR image URI :param cmd: Command to run on the container :param executable: Executable to run on the container (bash or python) :return: invoke output, can be used to parse stdout, etc """ if executable not in ("bash", "python"): LOGGER.warn(f"Unrecognized executable {executable}. It will be run as {executable} -c '{cmd}'") return context.run(f"docker exec --user root {container_name} {executable} -c '{cmd}'", hide=True, timeout=30) def _assert_artifact_free(output, stray_artifacts): """ Manage looping through assertions to determine that directories don't have known stray files. :param output: Invoke result object :param stray_artifacts: List of things that should not be present in these directories """ for artifact in stray_artifacts: assert not re.search(artifact, output.stdout), \ f"Matched {artifact} in {output.stdout} while running {output.command}"
""" Функции команд бота. """ import logging import random import re from typing import List, Dict import pkg_resources import requests from sqlalchemy.orm import selectinload from telegram import Update, ParseMode, InlineKeyboardMarkup, InlineKeyboardButton, ChatAction from telegram.ext import CallbackContext from pod042_bot import models, vk_client, utils HTML_ANEK_REGEX = re.compile(r'<meta name="description" content="(.*?)">', re.DOTALL) log = logging.getLogger('pod042-bot') def abort(update: Update, context: CallbackContext): """Сбрасывает текущее состояние чата.""" if update.channel_post: return with models.session_scope() as session: chat: models.Chat = session.query(models.Chat).get(update.effective_chat.id) if chat.state == models.ChatState.NONE: update.message.reply_text('Я ничем не занят!') else: chat.state = models.ChatState.NONE update.message.reply_text('Отменено.') def start(update: Update, context: CallbackContext): """Простое приветствие!""" log.info(f'User #{update.effective_user.id} started bot') update.message.reply_text('Ура, я запущен!') def everyone(update: Update, context: CallbackContext): """Упоминает всех в чате.""" if update.channel_post: return with models.session_scope() as session: users = session.query(models.Chat) \ .options(selectinload(models.Chat.users)) \ .get(update.effective_chat.id) \ .users user: models.User usernames = '' for user in users: if user.username: usernames += f'@{user.username} ' if usernames: update.effective_chat.send_message(usernames) else: update.effective_chat.send_message('Никого не знаю!') def config(update: Update, context: CallbackContext): """Входит в режим конфигурации.""" with models.session_scope() as session: chat: models.Chat = session.query(models.Chat).get(update.effective_chat.id) chat.state = models.ChatState.CONFIG vk_groups = '' for group in chat.vk_groups: vk_groups += f'{group.url_name} ({group.url_name})\n' if not vk_groups: vk_groups = 'Пусто!' msg = (f'Вошел в режим конфигурации.\n' f'/abort для отмены.\n\n' f'Текущие группы ВК: ```\n{vk_groups}```') update.message.reply_text(msg, parse_mode=ParseMode.MARKDOWN, reply_markup=InlineKeyboardMarkup( [ [InlineKeyboardButton('Изменить группы ВК', callback_data='vk_config'), ], ] )) def vk_pic(update: Update, context: CallbackContext): """Возвращает случайно выбранное медиа из настроенных для чата групп ВКонтакте.""" with models.session_scope() as session: chat: models.Chat = session.query(models.Chat).get(update.effective_chat.id) groups = chat.vk_groups if not groups: update.message.reply_text('Сначала настройте группы с помощью /config!') return context.bot.send_chat_action(update.effective_chat.id, ChatAction.UPLOAD_PHOTO) chosen_group: models.VkGroup = random.choice(groups) log.debug(f'Selected {chosen_group}') response: List[Dict] = vk_client.vk_tools.get_all('wall.get', max_count=10, values={ 'domain': chosen_group.url_name, 'fields': 'attachments', 'version': vk_client.VK_VER, }, limit=250)['items'] media_url = '' while not media_url: post = random.choice(response) if post.get('marked_as_ads', False): log.debug('Skipping ad') continue if 'attachments' not in post: log.debug('Skipping post w/o attachs') continue for attach in post['attachments']: if 'doc' in attach and attach['doc']['ext'] == 'gif': log.debug('Found gif!') media_url = attach['doc']['url'] break elif 'photo' in attach: log.debug('Found picture!') sizes_list: List[Dict] = attach['photo']['sizes'] avail_codes = map(lambda e: e['type'], sizes_list) if 'w' in avail_codes: code = 'w' elif 'z' in avail_codes: code = 'z' elif 'y' in avail_codes: code = 'y' else: continue element = next(i for i in sizes_list if i['type'] == code) if not element: continue media_url = element['url'] # update.message.reply_text(f'{media_url}\n' # f'Из https://vk.com/{chosen_group.url_name}') update.message.reply_photo(photo=media_url, caption=f'Из https://vk.com/{chosen_group.url_name}') def codfish(update: Update, context: CallbackContext): """Бьет треской по лицу выбранных пользователей. С видео!""" args = context.args bot = context.bot if not args: update.message.reply_text('Неверный формат команды. Пиши `/codfish @user_name1 @user_name2 ...`!', parse_mode=ParseMode.MARKDOWN) return bot.send_chat_action(update.effective_chat.id, ChatAction.RECORD_VIDEO) with models.session_scope() as session: chat: models.Chat = session.query(models.Chat).get(update.effective_chat.id) result = utils.get_names(args, session, chat) with pkg_resources.resource_stream('pod042_bot.resources.videos', 'codfish.mp4') as f: if any(x in args for x in ('@all', '@everyone', '@room')): bot.send_video(update.effective_chat.id, f, caption='Отпиздил треской всю комнату, да и себя ебанул, для профилактики.') elif len(args) == 1 and (args[0][1:] == bot.username or args[0][1:] == bot.first_name): bot.send_video(update.effective_chat.id, f, caption='Хорошенько пизданул себя треской.') else: if not result: update.message.reply_text('Не смог никого вспомнить...') return if bot.username in args or bot.first_name in args: bot.send_video( update.effective_chat.id, f, caption=f'Со всего размаху пизданул треской ' f'{', '.join(result)}, да и для себя трески не пожалел.' ) else: bot.send_video(update.effective_chat.id, f, caption=f'Со всего размаху пизданул треской {', '.join(result)}.') def pat(update: Update, context: CallbackContext): """Гладит указанных пользователей. Да, тоже с видео!""" args = context.args bot = context.bot if not args: update.message.reply_text('Неверный формат команды. Пиши `/pat @user_name1 @user_name2 ...`!', parse_mode=ParseMode.MARKDOWN) return bot.send_chat_action(update.effective_chat.id, ChatAction.RECORD_VIDEO) with models.session_scope() as session: chat: models.Chat = session.query(models.Chat).get(update.effective_chat.id) result = utils.get_names(args, session, chat) with pkg_resources.resource_stream('pod042_bot.resources.videos', 'pat.mp4') as f: if any(x in args for x in ('@all', '@everyone', '@room')): bot.send_video(update.effective_chat.id, f, caption='Ментально погладил всех в комнате!') elif len(args) == 1 and (args[0][1:] == bot.username or args[0][1:] == bot.first_name): bot.send_video(update.effective_chat.id, f, caption='Сам себя не погладишь – никто не погладит...') else: if not result: update.message.reply_text('Не смог никого вспомнить...') return if bot.username in args or bot.first_name in args: bot.send_video(update.effective_chat.id, f, caption=f'Ментально погладил {', '.join(result)}, да и себя не обидел!') else: bot.send_video(update.effective_chat.id, f, caption=f'Ментально погладил {', '.join(result)}!') def anek(update: Update, context: CallbackContext): """Присылает рандомный анекдот с baneks.ru.""" response = requests.get(f'https://baneks.ru/{random.randrange(1, 1142)}') response.encoding = 'utf-8' matches = HTML_ANEK_REGEX.search(response.text) result = matches.group(1) if matches else 'Ошибка...' update.message.reply_text(f'<code>{result}</code>', parse_mode=ParseMode.HTML) def quote(update: Update, context: CallbackContext): """Присылает рандомную цитату с tproger.com.""" result = requests.get('https://tproger.ru/wp-content/plugins/citation-widget/get-quote.php').text update.message.reply_text(f'<code>{result}</code>', parse_mode=ParseMode.HTML)
""" Функции команд бота. """ import logging import random import re from typing import List, Dict import pkg_resources import requests from sqlalchemy.orm import selectinload from telegram import Update, ParseMode, InlineKeyboardMarkup, InlineKeyboardButton, ChatAction from telegram.ext import CallbackContext from pod042_bot import models, vk_client, utils HTML_ANEK_REGEX = re.compile(r'<meta name="description" content="(.*?)">', re.DOTALL) log = logging.getLogger('pod042-bot') def abort(update: Update, context: CallbackContext): """Сбрасывает текущее состояние чата.""" if update.channel_post: return with models.session_scope() as session: chat: models.Chat = session.query(models.Chat).get(update.effective_chat.id) if chat.state == models.ChatState.NONE: update.message.reply_text('Я ничем не занят!') else: chat.state = models.ChatState.NONE update.message.reply_text('Отменено.') def start(update: Update, context: CallbackContext): """Простое приветствие!""" log.info(f'User #{update.effective_user.id} started bot') update.message.reply_text('Ура, я запущен!') def everyone(update: Update, context: CallbackContext): """Упоминает всех в чате.""" if update.channel_post: return with models.session_scope() as session: users = session.query(models.Chat) \ .options(selectinload(models.Chat.users)) \ .get(update.effective_chat.id) \ .users user: models.User usernames = '' for user in users: if user.username: usernames += f'@{user.username} ' if usernames: update.effective_chat.send_message(usernames) else: update.effective_chat.send_message('Никого не знаю!') def config(update: Update, context: CallbackContext): """Входит в режим конфигурации.""" with models.session_scope() as session: chat: models.Chat = session.query(models.Chat).get(update.effective_chat.id) chat.state = models.ChatState.CONFIG vk_groups = '' for group in chat.vk_groups: vk_groups += f'{group.url_name} ({group.url_name})\n' if not vk_groups: vk_groups = 'Пусто!' msg = (f'Вошел в режим конфигурации.\n' f'/abort для отмены.\n\n' f'Текущие группы ВК: ```\n{vk_groups}```') update.message.reply_text(msg, parse_mode=ParseMode.MARKDOWN, reply_markup=InlineKeyboardMarkup( [ [InlineKeyboardButton('Изменить группы ВК', callback_data='vk_config'), ], ] )) def vk_pic(update: Update, context: CallbackContext): """Возвращает случайно выбранное медиа из настроенных для чата групп ВКонтакте.""" with models.session_scope() as session: chat: models.Chat = session.query(models.Chat).get(update.effective_chat.id) groups = chat.vk_groups if not groups: update.message.reply_text('Сначала настройте группы с помощью /config!') return context.bot.send_chat_action(update.effective_chat.id, ChatAction.UPLOAD_PHOTO) chosen_group: models.VkGroup = random.choice(groups) log.debug(f'Selected {chosen_group}') response: List[Dict] = vk_client.vk_tools.get_all('wall.get', max_count=10, values={ 'domain': chosen_group.url_name, 'fields': 'attachments', 'version': vk_client.VK_VER, }, limit=250)['items'] media_url = '' while not media_url: post = random.choice(response) if post.get('marked_as_ads', False): log.debug('Skipping ad') continue if 'attachments' not in post: log.debug('Skipping post w/o attachs') continue for attach in post['attachments']: if 'doc' in attach and attach['doc']['ext'] == 'gif': log.debug('Found gif!') media_url = attach['doc']['url'] break elif 'photo' in attach: log.debug('Found picture!') sizes_list: List[Dict] = attach['photo']['sizes'] avail_codes = map(lambda e: e['type'], sizes_list) if 'w' in avail_codes: code = 'w' elif 'z' in avail_codes: code = 'z' elif 'y' in avail_codes: code = 'y' else: continue element = next(i for i in sizes_list if i['type'] == code) if not element: continue media_url = element['url'] # update.message.reply_text(f'{media_url}\n' # f'Из https://vk.com/{chosen_group.url_name}') update.message.reply_photo(photo=media_url, caption=f'Из https://vk.com/{chosen_group.url_name}') def codfish(update: Update, context: CallbackContext): """Бьет треской по лицу выбранных пользователей. С видео!""" args = context.args bot = context.bot if not args: update.message.reply_text('Неверный формат команды. Пиши `/codfish @user_name1 @user_name2 ...`!', parse_mode=ParseMode.MARKDOWN) return bot.send_chat_action(update.effective_chat.id, ChatAction.RECORD_VIDEO) with models.session_scope() as session: chat: models.Chat = session.query(models.Chat).get(update.effective_chat.id) result = utils.get_names(args, session, chat) with pkg_resources.resource_stream('pod042_bot.resources.videos', 'codfish.mp4') as f: if any(x in args for x in ('@all', '@everyone', '@room')): bot.send_video(update.effective_chat.id, f, caption='Отпиздил треской всю комнату, да и себя ебанул, для профилактики.') elif len(args) == 1 and (args[0][1:] == bot.username or args[0][1:] == bot.first_name): bot.send_video(update.effective_chat.id, f, caption='Хорошенько пизданул себя треской.') else: if not result: update.message.reply_text('Не смог никого вспомнить...') return if bot.username in args or bot.first_name in args: bot.send_video( update.effective_chat.id, f, caption=f'Со всего размаху пизданул треской ' f'{", ".join(result)}, да и для себя трески не пожалел.' ) else: bot.send_video(update.effective_chat.id, f, caption=f'Со всего размаху пизданул треской {", ".join(result)}.') def pat(update: Update, context: CallbackContext): """Гладит указанных пользователей. Да, тоже с видео!""" args = context.args bot = context.bot if not args: update.message.reply_text('Неверный формат команды. Пиши `/pat @user_name1 @user_name2 ...`!', parse_mode=ParseMode.MARKDOWN) return bot.send_chat_action(update.effective_chat.id, ChatAction.RECORD_VIDEO) with models.session_scope() as session: chat: models.Chat = session.query(models.Chat).get(update.effective_chat.id) result = utils.get_names(args, session, chat) with pkg_resources.resource_stream('pod042_bot.resources.videos', 'pat.mp4') as f: if any(x in args for x in ('@all', '@everyone', '@room')): bot.send_video(update.effective_chat.id, f, caption='Ментально погладил всех в комнате!') elif len(args) == 1 and (args[0][1:] == bot.username or args[0][1:] == bot.first_name): bot.send_video(update.effective_chat.id, f, caption='Сам себя не погладишь – никто не погладит...') else: if not result: update.message.reply_text('Не смог никого вспомнить...') return if bot.username in args or bot.first_name in args: bot.send_video(update.effective_chat.id, f, caption=f'Ментально погладил {", ".join(result)}, да и себя не обидел!') else: bot.send_video(update.effective_chat.id, f, caption=f'Ментально погладил {", ".join(result)}!') def anek(update: Update, context: CallbackContext): """Присылает рандомный анекдот с baneks.ru.""" response = requests.get(f'https://baneks.ru/{random.randrange(1, 1142)}') response.encoding = 'utf-8' matches = HTML_ANEK_REGEX.search(response.text) result = matches.group(1) if matches else 'Ошибка...' update.message.reply_text(f'<code>{result}</code>', parse_mode=ParseMode.HTML) def quote(update: Update, context: CallbackContext): """Присылает рандомную цитату с tproger.com.""" result = requests.get('https://tproger.ru/wp-content/plugins/citation-widget/get-quote.php').text update.message.reply_text(f'<code>{result}</code>', parse_mode=ParseMode.HTML)
""" The MIT License (MIT) Copyright (c) 2021 - Oxy Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ # -*- coding: utf-8 -*- import time import requests as rs url = "https://discord.com/api/v6/" class Client(): def __init__(self,token): self.token = token global headers headers = { 'authorization':token, 'content-type':'application/json' } class guild(): def join(self,invite:str): self.invite = invite res = rs.post(url + f"invites/{invite}",headers=headers) data = res.json() if res.status_code == 200: try: print(f""" Joined Guild : {data['guild']['name']} \nInvited By : {data['inviter']['username']}#{data['inviter']['discriminator']} \n Invite Code : {data['code']} """) except KeyError: print(f""" Joined Guild : {data['guild']['name']} \n """) else: print(data) def leave(self,guildid): self.guildid = guildid r = rs.delete(url + f"users/@me/guilds/{guildid}",headers=headers) data = r.json() print(data) def createGuild(self,name): self.name = name json = { "name":name } r = rs.post(url + "guilds",headers=headers,json=json) print(r.json()) def getGuildInfo(self,guildid): self.guildid = guildid r = rs.get(url + f"guilds/{guildid}?with_counts=true",headers=headers) data = r.json() try: print(f""" Guild Information : \n Name : {data["name"]} \n ID : {data["id"]} \n Icon : {data["icon"]} \n Description : {data["description"]} \n Splash : {data["splash"]} \nFeatures : {data['features']} \n Banner : {data['banner']} \n Owner ID : {data['owner_id']} \n Application ID : {data['application_id']} \n Region : {data['region']} \n Afk channel ID : {data['afk_channel_id']} \n Afk Timeout : {data['afk_timeout']} \n System Channrl ID : {data[ "system_channel_id"]} \n Widget Enabled : {data["widget_enabled"]} \n Widget Channel ID : {data["widget_channel_id"]} \n Verifiction Level : {data[ "verification_level"]} \nMessage Level : {data["default_message_notifications"]} \n MFA level : {data["mfa_level"]} \n Explicit Content Filter : {data["explicit_content_filter"]} \n Vanity URL : {data["vanity_url_code"]} \n Boost Level : {data["premium_tier"]} \n Boosters : {data["premium_subscription_count"]} \n System Channel Flags : {data["system_channel_flags"]} \n Locale : {data["preferred_locale"]} \n Rules channel id : {data["rules_channel_id"]} \n """) except KeyError: print("KeyError") def editGuild(self,guildid,name): self.guildid = guildid self.name = name json = { "name":name } r = rs.patch(url + f"guilds/{guildid}",headers=headers,json=json) print(r.json()) def deleteGuild(self,guildid): self.guildid = guildid r = rs.delete(url + f"guilds/{guildid}",headers=headers) print(r.json()) def getGuildChannels(self,guildid): self.guildid = guildid r = rs.get(url + f"guilds/{guildid}/channels",headers=headers) data = r.json() try: for data in data: print(f"""Name : {data["name"]} \n ID : {data["id"]} \n Type : {data["type"]} \n Topic : {data["topic"]} NSFW : {data["nsfw"]}""") except KeyError: print("KeyError") def getGuildMember(self,userid,guildid): self.guildid = guildid self.userid = userid r = rs.get(url + f"guilds/{guildid}/members/{userid}",headers=headers) print(r.json()) def createGuildChannel(self,guildid,name,topic): self.guildid = guildid self.name = name self.topic = topic json = { 'name':str(name), 'topic':str(topic) } r = rs.post(url + f"guilds/{guildid}/channels",headers=headers,json=json) print(r.json()) def changeMyNick(self,guildid,nick): self.guildid = guildid self.nick = nick json = { 'nick':str(nick) } r = rs.patch(url + f'guilds/{guildid}/members/@me/nick',headers=headers,json=json) print(r.json()) def addRoleToMember(self,guildid,userid,roleid): self.guildid = guildid self.userid= userid self.roleid = roleid r = rs.put(url + f"guilds/{guildid}/members/{userid}/roles/{roleid}",headers=headers) print(r.json()) def removeMemberRole(self,guildid,userid,roleid): self.guildid = guildid self.userid= userid self.roleid = roleid r = rs.delete(url + f"guilds/{guildid}/members/{userid}/roles/{roleid}",headers=headers) print(r.json()) def kickMember(self,guildid,userid): self.guildid = guildid self.userid= userid r = rs.delete(url + f"guilds/{guild.id}/members/{userid}",headers=headers) print(r.json()) def getGuildBans(self,guildid): self.guildid = guildid try: r = rs.get(url + f"guilds/{guildid}/bans",headers=headers) data = r.json() for data in data: time.sleep(1) print( f'''Name : {data['user']['username']}#{data['user']['disriminator']} \n ID : {data['user']['id']} \n Reason : {data['reason']} ''' ) except KeyError: print("KeyError") def getGuildRoles(self,guildid): self.guilidid = guildid r = rs.get(url + f"guilds/{guildid}",headers=headers) data = r.json() for role in data['roles']: print(f"Role : \n Name : {role["name"]} \n ID : {role["id"]} \n Color : {role["color"]} \n Mentionable : {role["mentionable"]} \n Display Role : {role["hoist"]} \n Position : {role["postion"]} ") else: return def getGuildEmojis(self,guildid): self.guildid = guildid r = rs.get(url + f"guilds/{guildid}",headers=headers) data = r.json() for emojis in data['emojis']: print( f'''Emoji : \n Name : {emojis['name']} \n ID : {emojis['id']} \n Animated : {emojis['animated']} \n Available : {emojis['available']} ''' ) else: return def getUserBanInfo(self,guildid,userid): self.guildid = guildid self.userid = userid r = rs.get(url + f"guilds/{guildid}/bans/{userid}",headers=headers) data = r.json() try: print( f'''Name : {data['user']['username']}#{data['user']['disriminator']} \n ID : {data['user']['id']} \n Reason : {data['reason']} ''' ) except KeyError: print("KeyError") def banMember(self,guildid,userid,reason): self.guildid = guildid self.userid = userid self.reason = reason reason = { 'reason': str(reason) } r = rs.put(url + f"guilds/{guildid}/bans/{userid}",headers=headers,json=reason) print(r.json()) def removeBan(self,guildid,userid): self.guildid = guildid self.userid = userid r = rs.delete(url + f"guilds/{guildid}/bans/{userid}",headers=headers) print(r.json()) def createGuildRole(self,guildid,name,hoist:bool,mentionable:bool,color:int=0): """NOTE : PERMISSIONS IS NOT SUPPORTED BECAUSE THEY ARE BITWISE VALUES AND CANNOT BE USED BY NORMAL PEOPLES""" self.guildid = guildid self.name = name self.color = color self.hoist = hoist self.mentionable = mentionable param = { 'name':str(name), 'color': color, 'hoist': hoist, 'mentionable':mentionable } r = rs.post(url + f"guilds/{guildid}/roles",headers=headers,json=param) print(r.json()) def editGuildRole(self,guildid,roleid,name,hoist:bool,mentionable:bool,color:int=0): """NOTE : PERMISSIONS IS NOT SUPPORTED BECAUSE THEY ARE BITWISE VALUES AND CANNOT BE USED BY NORMAL PEOPLES""" self.guildid = guildid self.roleid = roleid self.name = name self.color = color self.hoist = hoist self.mentionable = mentionable json = { 'name':str(name), 'color': color, 'hoist': hoist, 'mentionable':mentionable } r = rs.patch(url + f"guilds/{guildid}/roles/{roleid}",headers=headers,json=json) print(r.json()) def deleteGuildRole(self,guildid,roleid): self.guildid = guildid self.roleid = roleid r = rs.delete(url + f"guilds/{guildid}/roles/{roleid}",headers=headers) print(r.json()) def getPruneCount(self,guildid): self.guilidid = guildid r = rs.get(url + f"guilds/{guildid}/prune",headers=headers) print(r.json()) def pruneMembers(self,guildid,reason,days:int): self.guildid = guildid self.days = days self.reason = reason json = { 'days':days, 'reason':reason } r = rs.post(url + f"guilds/{guildid}/prune",headers=headers,json=json) print(r.json()) def getGuildInvites(self,guildid): self.guildid = guildid r = rs.get(url + f"guilds/{guildid}/invites",headers=headers) data = r.json() try: for data in data: print( f''' Guild Information : \n Name : {data['guild']['name']} \n ID : {data['guild']['id']} \n Description : {data['guild']['description']} \n Banner : {data['guild']['banner']} \n Icon : {data['guild']['icon']} \n Verification Level : {data['guild']['verification_level']} \n Vanity : {data['guild']['vanity_url_code']} \n ''' ) time.sleep(1) print(f"""Invite Information: \n Invite code: {data["code"]} \n Uses : {data["uses"]} \n Max Age : {data["max_age"]} \n Max uses : {data["max_uses"]} \n Temporary : {data["temporary"]} \n Created : {data["created_at"]} \n """) time.sleep(1) print(f""" Inviter Info : \n \n Name : {data['inviter']['username']} \n {data['inviter']['id']} \n Tag : {data['inviter']['discriminator']} \n Avatar : {data['inviter']['avatar']} \n \n Channel : \n Name : {data['channel']['name']} \n ID : {data['channel']['id']} \n """) except KeyError: return def getGuildIntegrations(self,guildid): self.guildid = guildid r = rs.get(url + f"guilds/{guildid}/integrations",headers=headers) data = r.json() for data in data: print(f''' Integration Info : \n Name : {data['name']} \n ID : {data['id']} \n Type : {data['type']} \n Enbaled : {data['enabled']} \n ''') def getGuildIntegrationsRaw(self,guildid): self.guildid = guildid r = rs.get(url + f"guilds/{guildid}/integrations",headers=headers) data = r.json() print(data) def deleteIntegrations(self,guildid,integrationid): self.guildid = guildid self.integrationid = integrationid r = rs.delete(url + f"guilds/{guildid}/integrations/{integrationid}",headers=headers) print(r.json()) def getWidgetSetting(self,guildid): self.guildid = guildid r = rs.get(url + f"guilds/{guildid}/widget",headers=headers) print(r.json()) def editWidgetSetting(self,guildid,enable:bool,channelid): self.guildid = guildid self.enable = enable self.channelid = channelid json = { "enabled": enable, "channel_id":str(channelid) } r = rs.patch(url + f"guilds/{guildid}/widget",headers=headers,json=json) print(r.json()) def getWidget(self,guildid): self.guildid= guildid r = rs.get(url + f"guilds/{guildid}/widget.json",headers=headers) print(r.json()) class channel(): #CHANNEL OBJECT def getChannel(self,channel_id): self.channel_id = channel_id r = rs.get(url + f"channels/{channel_id}",headers=headers) data = r.json() print(r.json()) def editChannel(self,channelid,name,topic): self.channelid = channelid self.name = name self.topic = topic params = { 'name':str(name), 'topic':str(topic) } try: r = rs.patch(url + f"channels/{channelid}",headers=headers,json=params) print(r.json()) except KeyError: print("KeyError") def getChannelMessages(self,channelid,limit:int=50): self.channelid = channelid self.limit = limit try: r = rs.get(url + f"channels/{channelid}/messages?limit={limit}",headers=headers) print(r.json()) except KeyError: print("KeyError") def getSpecificMessages(self,channelid,messageid): self.channelid = channelid self.messageid = messageid try: r = rs.get(url + f"channels/{channelid}/messages/{messageid}",headers=headers) print(r.json()) except KeyError: print("KeyError") def react(self,channelid,messageid,emojiname,emojiid): self.channelid = channelid self.messageid = messageid self.emojiname = emojiname self.emojiid = emojiid try: r = rs.put(url + f"channels/{channelid}/messages/{messageid}/reactions/{emojiname}:{emojiid}/@me",headers=headers) print(r.json()) except KeyError: print('KeyError') def unreact(self,channelid,messageid,emojiname,emojiid): self.channelid = channelid self.messageid = messageid self.emojiname = emojiname self.emojiid = emojiid r = rs.delete(url + f"channels/{channelid}/messages/{messageid}/reactions/{emojiname}:{emojiid}/@me",headers=headers) print(r.json()) def deleteUserReaction(self,channelid,messageid,emojiname,emojiid,userid): self.channelid = channelid self.messageid = messageid self.emojiname = emojiname self.emojiid = emojiid self.userid = userid r = rs.delete(url + f"channels/{channelid}/messages/{messageid}/reactions/{emojiname}:{emojiid}/{userid}",headers=headers) print(r.json()) def deleteReactionForEmoji(self,channelid,messageid,emojiname,emojiid): self.channelid = channelid self.messageid = messageid self.emojiname = emojiname self.emojiid = emojiid r = rs.delete(url + f"channels/{channelid}/messages/{messageid}/reactions/{emojiname}:{emojiid}",headers=headers) print(r.json()) def deleteAllReaction(self,channelid,messageid): self.channelid = channelid self.messageid = messageid r = rs.delete(url + f"channels/{channelid}/messages/{messageid}/reactions",headers=headers) print(r.json()) def deleteMessage(self,channelid,messageid): self.channelid = channelid self.messageid = messageid r = rs.delete(url + f"channels/{channelid}/messages/{messageid}",headers=headers) print(r.json()) def getChannelInvites(self,channelid): self.channelid = channelid r = rs.get(url + f"channels/{channelid}/invites",headers=headers) print(r.json()) def createChannelInvite(self,channelid:str): self.channelid = channelid json = { 'max_age':0 } r = rs.post(url + f"channels/{channelid}/invites",headers=headers,json=json) print("\n \n RAW DATA \n \n \n" , r.json()) print(f"INVITE CODE of {r.json()["guild"]["name"]} : " ,r.json()['code']) class emoji(): def getAllEmoji(self,guildid): self.guildid = guildid try: r = rs.get(url + f"guilds/{guildid}/emojis",headers=headers) key = r.json() for data in key: print(f''' \nEmoji Name : {data['name']} \nID: {data['id']} \n Animated : {data['animated']} \n Available : {data['available']} \n User Added : \n Name : {data['user']['username']} \n ID : {data['user']['id']} \n Tag : {data['user']['discriminator']} ''') except KeyError: print("KeyError") def getEmoji(self,guildid,emojiid): self.guildid = guildid self.emojiid = emojiid try: r = rs.get(url + f"guilds/{guildid}/emojis/{emojiid}",headers=headers) data = r.json() print(f''' \nEmoji Name : {data['name']} \nID: {data['id']} \n Animated : {data['animated']} \n Available : {data['available']} \n User Added : \n Name : {data['user']['username']} \n ID : {data['user']['id']} \n Tag : {data['user']['discriminator']} ''') except KeyError: print("KeyError") def editEmoji(self,guildid,emojiid,name): self.guildid = guildid self.emojiid = emojiid self.name = name try: json = { 'name':name } r = rs.patch(url + f"guilds/{guildid}/emojis/{emojiid}",headers=headers,json=json) print(r.json()) except KeyError: print('KeyError') def deleteEmoji(self,guildid,emojiid): self.guildid = guildid self.emojiid = emojiid r = rs.delete(url + f"guilds/{guildid}/emojis/{emojiid}",headers=headers) print(r.json()) class invite(): def getInfoInvite(self,invitecode): self.invitecode = invitecode r = rs.get(url + f"invites/{invitecode}",headers=headers) print(r.json()) def deleteInvite(self,invitecode): self.invitecode = invitecode r = rs.delete(url + f"invites/{invitecode}",headers=headers) print(r.json()) class auditlog(): def auditlogs(self,guildid,limit:int,actiontype:None): self.guildid = guildid self.limit = limit self.actiontype = actiontype if not limit and actiontype: r = rs.get(url + f"guilds/{guildid}/audit-logs",headers=headers) print(r.json()) elif actiontype == None: r = rs.get(url + f"guilds/{guildid}/audit-logs?limit=" + str(limit),headers=headers) print(r.json()) elif actiontype != None: r = rs.get(url + f"guilds/{guildid}/audit-logs?ation_type=" + str(actiontype),headers=headers) print(r.json()) def userAuditLogs(self,guildid,userid): self.guildid = guildid self.userid = userid if not userid == None: r = rs.get(url + f"guilds/{guildid}/audit-logs?limit=100?user_id" + str(userid),headers=headers) print(r.json()) else: print("Please put user id") class template(): def getInfoTemplate(self,templatecode:str): self.templatecode = templatecode r = rs.get(url + f"guilds/templates/{templatecode}",headers=headers) print(r.json()) def createTemplateGuild(self,templatecode): self.templatecode = templatecode r = rs.post(url + f"guilds/templates/{templatecode}",headers=headers) print(r.json()) def getGuildTemplate(self,guildid): self.guildid = guildid r = rs.get(url + f"guilds/{guildid}/templates",headers=headers) print(r.json()) def createGuildTemplate(self,guildid,name,description): self.name = name self.description = description self.guildid = guildid json = { 'name':str(name), 'desccrption': str(description) } r = rs.post(url + f"guilds/{guildid}/templates",headers=headers,json=json) print(r.json()) def syncTemplate(self,guildid,templatecode): self.guildid = guildid self.templatecode = templatecode r = rs.put(url + f"guilds/{guildid}/templates/{templatecode}",headers=headers) print(r.json()) def modifyTemplate(self,guildid,templatecode,name,description): self.guildid = guildid self.name = name self.description = description self.templatecode = templatecode json = { 'name': str(name), 'description':str(description) } r = rs.patch(url + f"guilds/{guildid}/templates/{templatecode}",headers=headers,json=json) print(r.json()) def deleteTemplate(self,guildid,templatecode): self.guildid = guildid self.templatecode = templatecode r = rs.delete(url + f"guilds/{guildid}/templates/{templatecode}",headers=headers) print(r.json()) class webhook(): def createWebhook(self,channelid,name): self.channelid = channelid self.name = name json = { 'name':str(name) } r = rs.post(url + f"channels/{channelid}/webhooks",headers=headers,json=json) print(r.json()) def getChannelWebhooks(self,channelid): self.channelid = channelid r = rs.get(url + f"channels/{channelid}/webhooks",headers=headers) print(r.json()) def getGuildWebhooks(self,guildid): self.guildid = guildid r = rs.get(url + f"guilds/{guildid}/webhooks",headers=headers) print(r.json()) def getWebhook(self,webhookid): self.wehookid = webhookid r = rs.get(url + f"webhooks/{webhookid}",headers=headers) print(r.json()) def getWebhookFromToken(self,webhookid,webhooktoken): self.webhookid = webhookid self.webhooktoken = webhooktoken r = rs.get(url + f"webhooks/{webhookid}/{webhooktoken}",headers=headers) print(r.json()) def editWebhook(self,webhookid,name,channelid): self.webhookid = webhookid self.name = name self.channelid = channelid json = { 'name':str(name), 'channel_id':(channelid) } r = rs.patch(url + f"webhooks/{webhookid}",headers=headers,json=json) print(r.json()) def deleteWebhook(self,webhookid): self.webhook = webhookid r = rs.delete(url + f"webhooks/{webhookid}",headers=headers) print(r.json()) def exeuteWebhook(self,webhookid,webhooktoken,text): self.webhookid = webhookid self.webhooktoken = webhooktoken self.text = text json = { 'content': str(text) } r = rs.post(url + f"webhooks/{webhookid}/{webhooktoken}",headers=headers,json=json) print(r.json()) def deleteWebhookMessage(self,webhookid,webhooktoken,messageid): self.messageid = messageid self.webhookid = webhookid self.webhooktoken = webhooktoken r = rs.delete(url + f"webhooks/{webhookid}/{webhooktoken}/messages/{messageid}",headers=headers) print(r.json()) class user(): def getMyInfo(self): r = rs.get(url + "users/@me",headers=headers) print(r.json()) def getUser(self,userid): self.userid = userid r = rs.get(url + f"users/{userid}",headers=headers) print(r.json()) def editUsername(self,name): self.name = name json = { 'name':str(name) } r = rs.patch(url + 'users/@me',headers=headers,json=json) print(r.json()) def editAvatar(self,avatar): self.avatar = avatar json = { 'avatar':avatar } r = rs.patch(url + 'users/@me',headers=headers,json=json) print(r.json()) def createDM(self,recipientid): self.recipientid = recipientid json = { 'recipient_id':recipientid } r = rs.post(url + f"users/@me/channels",headers=headers,json=json) print(r.json()) def getConnections(self): r = rs.get(url + "users/@me/connections",headers=headers) print(r.json())
""" The MIT License (MIT) Copyright (c) 2021 - Oxy Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ # -*- coding: utf-8 -*- import time import requests as rs url = "https://discord.com/api/v6/" class Client(): def __init__(self,token): self.token = token global headers headers = { 'authorization':token, 'content-type':'application/json' } class guild(): def join(self,invite:str): self.invite = invite res = rs.post(url + f"invites/{invite}",headers=headers) data = res.json() if res.status_code == 200: try: print(f""" Joined Guild : {data['guild']['name']} \nInvited By : {data['inviter']['username']}#{data['inviter']['discriminator']} \n Invite Code : {data['code']} """) except KeyError: print(f""" Joined Guild : {data['guild']['name']} \n """) else: print(data) def leave(self,guildid): self.guildid = guildid r = rs.delete(url + f"users/@me/guilds/{guildid}",headers=headers) data = r.json() print(data) def createGuild(self,name): self.name = name json = { "name":name } r = rs.post(url + "guilds",headers=headers,json=json) print(r.json()) def getGuildInfo(self,guildid): self.guildid = guildid r = rs.get(url + f"guilds/{guildid}?with_counts=true",headers=headers) data = r.json() try: print(f""" Guild Information : \n Name : {data['name']} \n ID : {data['id']} \n Icon : {data["icon"]} \n Description : {data["description"]} \n Splash : {data["splash"]} \nFeatures : {data['features']} \n Banner : {data['banner']} \n Owner ID : {data['owner_id']} \n Application ID : {data['application_id']} \n Region : {data['region']} \n Afk channel ID : {data['afk_channel_id']} \n Afk Timeout : {data['afk_timeout']} \n System Channrl ID : {data[ "system_channel_id"]} \n Widget Enabled : {data["widget_enabled"]} \n Widget Channel ID : {data["widget_channel_id"]} \n Verifiction Level : {data[ "verification_level"]} \nMessage Level : {data["default_message_notifications"]} \n MFA level : {data["mfa_level"]} \n Explicit Content Filter : {data["explicit_content_filter"]} \n Vanity URL : {data["vanity_url_code"]} \n Boost Level : {data["premium_tier"]} \n Boosters : {data["premium_subscription_count"]} \n System Channel Flags : {data["system_channel_flags"]} \n Locale : {data["preferred_locale"]} \n Rules channel id : {data["rules_channel_id"]} \n """) except KeyError: print("KeyError") def editGuild(self,guildid,name): self.guildid = guildid self.name = name json = { "name":name } r = rs.patch(url + f"guilds/{guildid}",headers=headers,json=json) print(r.json()) def deleteGuild(self,guildid): self.guildid = guildid r = rs.delete(url + f"guilds/{guildid}",headers=headers) print(r.json()) def getGuildChannels(self,guildid): self.guildid = guildid r = rs.get(url + f"guilds/{guildid}/channels",headers=headers) data = r.json() try: for data in data: print(f"""Name : {data['name']} \n ID : {data['id']} \n Type : {data['type']} \n Topic : {data['topic']} NSFW : {data['nsfw']}""") except KeyError: print("KeyError") def getGuildMember(self,userid,guildid): self.guildid = guildid self.userid = userid r = rs.get(url + f"guilds/{guildid}/members/{userid}",headers=headers) print(r.json()) def createGuildChannel(self,guildid,name,topic): self.guildid = guildid self.name = name self.topic = topic json = { 'name':str(name), 'topic':str(topic) } r = rs.post(url + f"guilds/{guildid}/channels",headers=headers,json=json) print(r.json()) def changeMyNick(self,guildid,nick): self.guildid = guildid self.nick = nick json = { 'nick':str(nick) } r = rs.patch(url + f'guilds/{guildid}/members/@me/nick',headers=headers,json=json) print(r.json()) def addRoleToMember(self,guildid,userid,roleid): self.guildid = guildid self.userid= userid self.roleid = roleid r = rs.put(url + f"guilds/{guildid}/members/{userid}/roles/{roleid}",headers=headers) print(r.json()) def removeMemberRole(self,guildid,userid,roleid): self.guildid = guildid self.userid= userid self.roleid = roleid r = rs.delete(url + f"guilds/{guildid}/members/{userid}/roles/{roleid}",headers=headers) print(r.json()) def kickMember(self,guildid,userid): self.guildid = guildid self.userid= userid r = rs.delete(url + f"guilds/{guild.id}/members/{userid}",headers=headers) print(r.json()) def getGuildBans(self,guildid): self.guildid = guildid try: r = rs.get(url + f"guilds/{guildid}/bans",headers=headers) data = r.json() for data in data: time.sleep(1) print( f'''Name : {data['user']['username']}#{data['user']['disriminator']} \n ID : {data['user']['id']} \n Reason : {data['reason']} ''' ) except KeyError: print("KeyError") def getGuildRoles(self,guildid): self.guilidid = guildid r = rs.get(url + f"guilds/{guildid}",headers=headers) data = r.json() for role in data['roles']: print(f"Role : \n Name : {role['name']} \n ID : {role['id']} \n Color : {role['color']} \n Mentionable : {role['mentionable']} \n Display Role : {role['hoist']} \n Position : {role['postion']} ") else: return def getGuildEmojis(self,guildid): self.guildid = guildid r = rs.get(url + f"guilds/{guildid}",headers=headers) data = r.json() for emojis in data['emojis']: print( f'''Emoji : \n Name : {emojis['name']} \n ID : {emojis['id']} \n Animated : {emojis['animated']} \n Available : {emojis['available']} ''' ) else: return def getUserBanInfo(self,guildid,userid): self.guildid = guildid self.userid = userid r = rs.get(url + f"guilds/{guildid}/bans/{userid}",headers=headers) data = r.json() try: print( f'''Name : {data['user']['username']}#{data['user']['disriminator']} \n ID : {data['user']['id']} \n Reason : {data['reason']} ''' ) except KeyError: print("KeyError") def banMember(self,guildid,userid,reason): self.guildid = guildid self.userid = userid self.reason = reason reason = { 'reason': str(reason) } r = rs.put(url + f"guilds/{guildid}/bans/{userid}",headers=headers,json=reason) print(r.json()) def removeBan(self,guildid,userid): self.guildid = guildid self.userid = userid r = rs.delete(url + f"guilds/{guildid}/bans/{userid}",headers=headers) print(r.json()) def createGuildRole(self,guildid,name,hoist:bool,mentionable:bool,color:int=0): """NOTE : PERMISSIONS IS NOT SUPPORTED BECAUSE THEY ARE BITWISE VALUES AND CANNOT BE USED BY NORMAL PEOPLES""" self.guildid = guildid self.name = name self.color = color self.hoist = hoist self.mentionable = mentionable param = { 'name':str(name), 'color': color, 'hoist': hoist, 'mentionable':mentionable } r = rs.post(url + f"guilds/{guildid}/roles",headers=headers,json=param) print(r.json()) def editGuildRole(self,guildid,roleid,name,hoist:bool,mentionable:bool,color:int=0): """NOTE : PERMISSIONS IS NOT SUPPORTED BECAUSE THEY ARE BITWISE VALUES AND CANNOT BE USED BY NORMAL PEOPLES""" self.guildid = guildid self.roleid = roleid self.name = name self.color = color self.hoist = hoist self.mentionable = mentionable json = { 'name':str(name), 'color': color, 'hoist': hoist, 'mentionable':mentionable } r = rs.patch(url + f"guilds/{guildid}/roles/{roleid}",headers=headers,json=json) print(r.json()) def deleteGuildRole(self,guildid,roleid): self.guildid = guildid self.roleid = roleid r = rs.delete(url + f"guilds/{guildid}/roles/{roleid}",headers=headers) print(r.json()) def getPruneCount(self,guildid): self.guilidid = guildid r = rs.get(url + f"guilds/{guildid}/prune",headers=headers) print(r.json()) def pruneMembers(self,guildid,reason,days:int): self.guildid = guildid self.days = days self.reason = reason json = { 'days':days, 'reason':reason } r = rs.post(url + f"guilds/{guildid}/prune",headers=headers,json=json) print(r.json()) def getGuildInvites(self,guildid): self.guildid = guildid r = rs.get(url + f"guilds/{guildid}/invites",headers=headers) data = r.json() try: for data in data: print( f''' Guild Information : \n Name : {data['guild']['name']} \n ID : {data['guild']['id']} \n Description : {data['guild']['description']} \n Banner : {data['guild']['banner']} \n Icon : {data['guild']['icon']} \n Verification Level : {data['guild']['verification_level']} \n Vanity : {data['guild']['vanity_url_code']} \n ''' ) time.sleep(1) print(f"""Invite Information: \n Invite code: {data['code']} \n Uses : {data['uses']} \n Max Age : {data['max_age']} \n Max uses : {data['max_uses']} \n Temporary : {data['temporary']} \n Created : {data['created_at']} \n """) time.sleep(1) print(f""" Inviter Info : \n \n Name : {data['inviter']['username']} \n {data['inviter']['id']} \n Tag : {data['inviter']['discriminator']} \n Avatar : {data['inviter']['avatar']} \n \n Channel : \n Name : {data['channel']['name']} \n ID : {data['channel']['id']} \n """) except KeyError: return def getGuildIntegrations(self,guildid): self.guildid = guildid r = rs.get(url + f"guilds/{guildid}/integrations",headers=headers) data = r.json() for data in data: print(f''' Integration Info : \n Name : {data['name']} \n ID : {data['id']} \n Type : {data['type']} \n Enbaled : {data['enabled']} \n ''') def getGuildIntegrationsRaw(self,guildid): self.guildid = guildid r = rs.get(url + f"guilds/{guildid}/integrations",headers=headers) data = r.json() print(data) def deleteIntegrations(self,guildid,integrationid): self.guildid = guildid self.integrationid = integrationid r = rs.delete(url + f"guilds/{guildid}/integrations/{integrationid}",headers=headers) print(r.json()) def getWidgetSetting(self,guildid): self.guildid = guildid r = rs.get(url + f"guilds/{guildid}/widget",headers=headers) print(r.json()) def editWidgetSetting(self,guildid,enable:bool,channelid): self.guildid = guildid self.enable = enable self.channelid = channelid json = { "enabled": enable, "channel_id":str(channelid) } r = rs.patch(url + f"guilds/{guildid}/widget",headers=headers,json=json) print(r.json()) def getWidget(self,guildid): self.guildid= guildid r = rs.get(url + f"guilds/{guildid}/widget.json",headers=headers) print(r.json()) class channel(): #CHANNEL OBJECT def getChannel(self,channel_id): self.channel_id = channel_id r = rs.get(url + f"channels/{channel_id}",headers=headers) data = r.json() print(r.json()) def editChannel(self,channelid,name,topic): self.channelid = channelid self.name = name self.topic = topic params = { 'name':str(name), 'topic':str(topic) } try: r = rs.patch(url + f"channels/{channelid}",headers=headers,json=params) print(r.json()) except KeyError: print("KeyError") def getChannelMessages(self,channelid,limit:int=50): self.channelid = channelid self.limit = limit try: r = rs.get(url + f"channels/{channelid}/messages?limit={limit}",headers=headers) print(r.json()) except KeyError: print("KeyError") def getSpecificMessages(self,channelid,messageid): self.channelid = channelid self.messageid = messageid try: r = rs.get(url + f"channels/{channelid}/messages/{messageid}",headers=headers) print(r.json()) except KeyError: print("KeyError") def react(self,channelid,messageid,emojiname,emojiid): self.channelid = channelid self.messageid = messageid self.emojiname = emojiname self.emojiid = emojiid try: r = rs.put(url + f"channels/{channelid}/messages/{messageid}/reactions/{emojiname}:{emojiid}/@me",headers=headers) print(r.json()) except KeyError: print('KeyError') def unreact(self,channelid,messageid,emojiname,emojiid): self.channelid = channelid self.messageid = messageid self.emojiname = emojiname self.emojiid = emojiid r = rs.delete(url + f"channels/{channelid}/messages/{messageid}/reactions/{emojiname}:{emojiid}/@me",headers=headers) print(r.json()) def deleteUserReaction(self,channelid,messageid,emojiname,emojiid,userid): self.channelid = channelid self.messageid = messageid self.emojiname = emojiname self.emojiid = emojiid self.userid = userid r = rs.delete(url + f"channels/{channelid}/messages/{messageid}/reactions/{emojiname}:{emojiid}/{userid}",headers=headers) print(r.json()) def deleteReactionForEmoji(self,channelid,messageid,emojiname,emojiid): self.channelid = channelid self.messageid = messageid self.emojiname = emojiname self.emojiid = emojiid r = rs.delete(url + f"channels/{channelid}/messages/{messageid}/reactions/{emojiname}:{emojiid}",headers=headers) print(r.json()) def deleteAllReaction(self,channelid,messageid): self.channelid = channelid self.messageid = messageid r = rs.delete(url + f"channels/{channelid}/messages/{messageid}/reactions",headers=headers) print(r.json()) def deleteMessage(self,channelid,messageid): self.channelid = channelid self.messageid = messageid r = rs.delete(url + f"channels/{channelid}/messages/{messageid}",headers=headers) print(r.json()) def getChannelInvites(self,channelid): self.channelid = channelid r = rs.get(url + f"channels/{channelid}/invites",headers=headers) print(r.json()) def createChannelInvite(self,channelid:str): self.channelid = channelid json = { 'max_age':0 } r = rs.post(url + f"channels/{channelid}/invites",headers=headers,json=json) print("\n \n RAW DATA \n \n \n" , r.json()) print(f"INVITE CODE of {r.json()['guild']['name']} : " ,r.json()['code']) class emoji(): def getAllEmoji(self,guildid): self.guildid = guildid try: r = rs.get(url + f"guilds/{guildid}/emojis",headers=headers) key = r.json() for data in key: print(f''' \nEmoji Name : {data['name']} \nID: {data['id']} \n Animated : {data['animated']} \n Available : {data['available']} \n User Added : \n Name : {data['user']['username']} \n ID : {data['user']['id']} \n Tag : {data['user']['discriminator']} ''') except KeyError: print("KeyError") def getEmoji(self,guildid,emojiid): self.guildid = guildid self.emojiid = emojiid try: r = rs.get(url + f"guilds/{guildid}/emojis/{emojiid}",headers=headers) data = r.json() print(f''' \nEmoji Name : {data['name']} \nID: {data['id']} \n Animated : {data['animated']} \n Available : {data['available']} \n User Added : \n Name : {data['user']['username']} \n ID : {data['user']['id']} \n Tag : {data['user']['discriminator']} ''') except KeyError: print("KeyError") def editEmoji(self,guildid,emojiid,name): self.guildid = guildid self.emojiid = emojiid self.name = name try: json = { 'name':name } r = rs.patch(url + f"guilds/{guildid}/emojis/{emojiid}",headers=headers,json=json) print(r.json()) except KeyError: print('KeyError') def deleteEmoji(self,guildid,emojiid): self.guildid = guildid self.emojiid = emojiid r = rs.delete(url + f"guilds/{guildid}/emojis/{emojiid}",headers=headers) print(r.json()) class invite(): def getInfoInvite(self,invitecode): self.invitecode = invitecode r = rs.get(url + f"invites/{invitecode}",headers=headers) print(r.json()) def deleteInvite(self,invitecode): self.invitecode = invitecode r = rs.delete(url + f"invites/{invitecode}",headers=headers) print(r.json()) class auditlog(): def auditlogs(self,guildid,limit:int,actiontype:None): self.guildid = guildid self.limit = limit self.actiontype = actiontype if not limit and actiontype: r = rs.get(url + f"guilds/{guildid}/audit-logs",headers=headers) print(r.json()) elif actiontype == None: r = rs.get(url + f"guilds/{guildid}/audit-logs?limit=" + str(limit),headers=headers) print(r.json()) elif actiontype != None: r = rs.get(url + f"guilds/{guildid}/audit-logs?ation_type=" + str(actiontype),headers=headers) print(r.json()) def userAuditLogs(self,guildid,userid): self.guildid = guildid self.userid = userid if not userid == None: r = rs.get(url + f"guilds/{guildid}/audit-logs?limit=100?user_id" + str(userid),headers=headers) print(r.json()) else: print("Please put user id") class template(): def getInfoTemplate(self,templatecode:str): self.templatecode = templatecode r = rs.get(url + f"guilds/templates/{templatecode}",headers=headers) print(r.json()) def createTemplateGuild(self,templatecode): self.templatecode = templatecode r = rs.post(url + f"guilds/templates/{templatecode}",headers=headers) print(r.json()) def getGuildTemplate(self,guildid): self.guildid = guildid r = rs.get(url + f"guilds/{guildid}/templates",headers=headers) print(r.json()) def createGuildTemplate(self,guildid,name,description): self.name = name self.description = description self.guildid = guildid json = { 'name':str(name), 'desccrption': str(description) } r = rs.post(url + f"guilds/{guildid}/templates",headers=headers,json=json) print(r.json()) def syncTemplate(self,guildid,templatecode): self.guildid = guildid self.templatecode = templatecode r = rs.put(url + f"guilds/{guildid}/templates/{templatecode}",headers=headers) print(r.json()) def modifyTemplate(self,guildid,templatecode,name,description): self.guildid = guildid self.name = name self.description = description self.templatecode = templatecode json = { 'name': str(name), 'description':str(description) } r = rs.patch(url + f"guilds/{guildid}/templates/{templatecode}",headers=headers,json=json) print(r.json()) def deleteTemplate(self,guildid,templatecode): self.guildid = guildid self.templatecode = templatecode r = rs.delete(url + f"guilds/{guildid}/templates/{templatecode}",headers=headers) print(r.json()) class webhook(): def createWebhook(self,channelid,name): self.channelid = channelid self.name = name json = { 'name':str(name) } r = rs.post(url + f"channels/{channelid}/webhooks",headers=headers,json=json) print(r.json()) def getChannelWebhooks(self,channelid): self.channelid = channelid r = rs.get(url + f"channels/{channelid}/webhooks",headers=headers) print(r.json()) def getGuildWebhooks(self,guildid): self.guildid = guildid r = rs.get(url + f"guilds/{guildid}/webhooks",headers=headers) print(r.json()) def getWebhook(self,webhookid): self.wehookid = webhookid r = rs.get(url + f"webhooks/{webhookid}",headers=headers) print(r.json()) def getWebhookFromToken(self,webhookid,webhooktoken): self.webhookid = webhookid self.webhooktoken = webhooktoken r = rs.get(url + f"webhooks/{webhookid}/{webhooktoken}",headers=headers) print(r.json()) def editWebhook(self,webhookid,name,channelid): self.webhookid = webhookid self.name = name self.channelid = channelid json = { 'name':str(name), 'channel_id':(channelid) } r = rs.patch(url + f"webhooks/{webhookid}",headers=headers,json=json) print(r.json()) def deleteWebhook(self,webhookid): self.webhook = webhookid r = rs.delete(url + f"webhooks/{webhookid}",headers=headers) print(r.json()) def exeuteWebhook(self,webhookid,webhooktoken,text): self.webhookid = webhookid self.webhooktoken = webhooktoken self.text = text json = { 'content': str(text) } r = rs.post(url + f"webhooks/{webhookid}/{webhooktoken}",headers=headers,json=json) print(r.json()) def deleteWebhookMessage(self,webhookid,webhooktoken,messageid): self.messageid = messageid self.webhookid = webhookid self.webhooktoken = webhooktoken r = rs.delete(url + f"webhooks/{webhookid}/{webhooktoken}/messages/{messageid}",headers=headers) print(r.json()) class user(): def getMyInfo(self): r = rs.get(url + "users/@me",headers=headers) print(r.json()) def getUser(self,userid): self.userid = userid r = rs.get(url + f"users/{userid}",headers=headers) print(r.json()) def editUsername(self,name): self.name = name json = { 'name':str(name) } r = rs.patch(url + 'users/@me',headers=headers,json=json) print(r.json()) def editAvatar(self,avatar): self.avatar = avatar json = { 'avatar':avatar } r = rs.patch(url + 'users/@me',headers=headers,json=json) print(r.json()) def createDM(self,recipientid): self.recipientid = recipientid json = { 'recipient_id':recipientid } r = rs.post(url + f"users/@me/channels",headers=headers,json=json) print(r.json()) def getConnections(self): r = rs.get(url + "users/@me/connections",headers=headers) print(r.json())
# Copyright (c) 2015-present, Facebook, Inc. # All rights reserved. import os import json from torchvision import datasets, transforms from torchvision.datasets.folder import ImageFolder, default_loader from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.data import create_transform class INatDataset(ImageFolder): def __init__( self, root, train=True, year=2018, transform=None, target_transform=None, category="name", loader=default_loader, ): self.transform = transform self.loader = loader self.target_transform = target_transform self.year = year # assert category in ['kingdom','phylum','class','order','supercategory','family','genus','name'] path_json = os.path.join(root, f'{'train' if train else 'val'}{year}.json') with open(path_json) as json_file: data = json.load(json_file) with open(os.path.join(root, "categories.json")) as json_file: data_catg = json.load(json_file) path_json_for_targeter = os.path.join(root, f"train{year}.json") with open(path_json_for_targeter) as json_file: data_for_targeter = json.load(json_file) targeter = {} indexer = 0 for elem in data_for_targeter["annotations"]: king = [] king.append(data_catg[int(elem["category_id"])][category]) if king[0] not in targeter.keys(): targeter[king[0]] = indexer indexer += 1 self.nb_classes = len(targeter) self.samples = [] for elem in data["images"]: cut = elem["file_name"].split("/") target_current = int(cut[2]) path_current = os.path.join(root, cut[0], cut[2], cut[3]) categors = data_catg[target_current] target_current_true = targeter[categors[category]] self.samples.append((path_current, target_current_true)) # __getitem__ and __len__ inherited from ImageFolder def build_dataset(is_train, args): transform = build_transform(is_train, args) if args.data_set == "CIFAR": dataset = datasets.CIFAR100(args.data_path, train=is_train, transform=transform) nb_classes = 100 elif args.data_set == "CIFAR10": root = os.path.join(args.data_path, "train" if is_train else "val") dataset = datasets.ImageFolder(root, transform=transform) nb_classes = 10 elif args.data_set == "IMNET": root = os.path.join(args.data_path, "train" if is_train else "val") dataset = datasets.ImageFolder(root, transform=transform) nb_classes = 1000 elif args.data_set == "INAT": dataset = INatDataset( args.data_path, train=is_train, year=2018, category=args.inat_category, transform=transform, ) nb_classes = dataset.nb_classes elif args.data_set == "INAT19": dataset = INatDataset( args.data_path, train=is_train, year=2019, category=args.inat_category, transform=transform, ) nb_classes = dataset.nb_classes return dataset, nb_classes def build_transform(is_train, args): resize_im = args.input_size > 32 if is_train: # this should always dispatch to transforms_imagenet_train transform = create_transform( input_size=args.input_size, is_training=True, color_jitter=args.color_jitter, auto_augment=args.aa, interpolation=args.train_interpolation, re_prob=args.reprob, re_mode=args.remode, re_count=args.recount, ) if not resize_im: # replace RandomResizedCropAndInterpolation with # RandomCrop transform.transforms[0] = transforms.RandomCrop(args.input_size, padding=4) return transform t = [] if resize_im: size = int((256 / 224) * args.input_size) t.append( transforms.Resize(size, interpolation=3), # to maintain same ratio w.r.t. 224 images ) t.append(transforms.CenterCrop(args.input_size)) t.append(transforms.ToTensor()) t.append(transforms.Normalize(IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD)) return transforms.Compose(t)
# Copyright (c) 2015-present, Facebook, Inc. # All rights reserved. import os import json from torchvision import datasets, transforms from torchvision.datasets.folder import ImageFolder, default_loader from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.data import create_transform class INatDataset(ImageFolder): def __init__( self, root, train=True, year=2018, transform=None, target_transform=None, category="name", loader=default_loader, ): self.transform = transform self.loader = loader self.target_transform = target_transform self.year = year # assert category in ['kingdom','phylum','class','order','supercategory','family','genus','name'] path_json = os.path.join(root, f'{"train" if train else "val"}{year}.json') with open(path_json) as json_file: data = json.load(json_file) with open(os.path.join(root, "categories.json")) as json_file: data_catg = json.load(json_file) path_json_for_targeter = os.path.join(root, f"train{year}.json") with open(path_json_for_targeter) as json_file: data_for_targeter = json.load(json_file) targeter = {} indexer = 0 for elem in data_for_targeter["annotations"]: king = [] king.append(data_catg[int(elem["category_id"])][category]) if king[0] not in targeter.keys(): targeter[king[0]] = indexer indexer += 1 self.nb_classes = len(targeter) self.samples = [] for elem in data["images"]: cut = elem["file_name"].split("/") target_current = int(cut[2]) path_current = os.path.join(root, cut[0], cut[2], cut[3]) categors = data_catg[target_current] target_current_true = targeter[categors[category]] self.samples.append((path_current, target_current_true)) # __getitem__ and __len__ inherited from ImageFolder def build_dataset(is_train, args): transform = build_transform(is_train, args) if args.data_set == "CIFAR": dataset = datasets.CIFAR100(args.data_path, train=is_train, transform=transform) nb_classes = 100 elif args.data_set == "CIFAR10": root = os.path.join(args.data_path, "train" if is_train else "val") dataset = datasets.ImageFolder(root, transform=transform) nb_classes = 10 elif args.data_set == "IMNET": root = os.path.join(args.data_path, "train" if is_train else "val") dataset = datasets.ImageFolder(root, transform=transform) nb_classes = 1000 elif args.data_set == "INAT": dataset = INatDataset( args.data_path, train=is_train, year=2018, category=args.inat_category, transform=transform, ) nb_classes = dataset.nb_classes elif args.data_set == "INAT19": dataset = INatDataset( args.data_path, train=is_train, year=2019, category=args.inat_category, transform=transform, ) nb_classes = dataset.nb_classes return dataset, nb_classes def build_transform(is_train, args): resize_im = args.input_size > 32 if is_train: # this should always dispatch to transforms_imagenet_train transform = create_transform( input_size=args.input_size, is_training=True, color_jitter=args.color_jitter, auto_augment=args.aa, interpolation=args.train_interpolation, re_prob=args.reprob, re_mode=args.remode, re_count=args.recount, ) if not resize_im: # replace RandomResizedCropAndInterpolation with # RandomCrop transform.transforms[0] = transforms.RandomCrop(args.input_size, padding=4) return transform t = [] if resize_im: size = int((256 / 224) * args.input_size) t.append( transforms.Resize(size, interpolation=3), # to maintain same ratio w.r.t. 224 images ) t.append(transforms.CenterCrop(args.input_size)) t.append(transforms.ToTensor()) t.append(transforms.Normalize(IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD)) return transforms.Compose(t)
#!/usr/bin/env python3 import subprocess import logging import copy import os import time from schema import VMDescriptionSchema, VMNameSchema from expose import ExposedClass, exposed, transformational from exception import VMRunningError, VMNotRunningError from threading import RLock from tap_device import TAPDevice from qmp import QMPMonitor from vnc import VNCAllocator QEMU_BINARY = "/usr/bin/qemu-system-x86_64" class VM(ExposedClass): description_schema = VMDescriptionSchema(many=False) name_schema = VMNameSchema(many=False) # From the few bad solutions this is the least worse def __init__(self, name: str, description: dict): self._logger = logging.getLogger("vm") self._description = self.description_schema.load(description) self._name = self.name_schema.load({'name': name})['name'] self._logger = logging.getLogger("vm").getChild(name) self._qmp = None self._tapdevs = [] self._process = None self._vnc_port = None self._lock = RLock() @staticmethod def _preexec(): # do not forward signals (Like. SIGINT, SIGTERM) os.setpgrp() def _poweroff_cleanup(self, timeout: int = 5): if self.is_running(): self._logger.info(f"Qemu process still running. Delaying cleanup. (max. {timeout}sec)") wait_started = time.time() while self.is_running(): time.sleep(1) if (time.time() - wait_started) > 5: self._logger.warning("Cleanup delay expired. Killing Qemu!") self._process.kill() self._logger.debug("Cleaning up...") for tapdev in self._tapdevs: tapdev.free() self._tapdevs = [] self._qmp.disconnect() # Fun fact: This will be called from the qmp process self._qmp = None def _enforce_vm_state(self, running: bool): if running != self.is_running(): if self.is_running(): raise VMRunningError() else: raise VMNotRunningError() def destroy(self): with self._lock: if self.is_running(): raise VMRunningError("Can not destory running VM") def autostart(self): """ Starts the VM, if it's marked as autostart. Otherwise does nothing. """ with self._lock: if self._description['autostart']: try: self.start() except VMRunningError: self._logger.debug("Not autostarting because already running... (wtf?)") @exposed @transformational def start(self): with self._lock: self._enforce_vm_state(False) self._logger.info("Starting VM...") # The VM is not running. It's safe to kill off the QMP Monitor if self._qmp and self._qmp.is_alive(): self._logger.warning("Closing a zombie QMP Monitor... (maybe the VM was still running?)") self._qmp.disconnect(cleanup=True) self._qmp.join() # === QEMU Setup === args = [QEMU_BINARY, '-monitor', 'none'] # Monitor none disables the QEMU command prompt # Could be set to telnet or other device args += ['-serial', 'null'] # could be leaved out to disable kvm args += ['-enable-kvm', '-cpu', 'host'] args += ['-name', self._name] # setup VNC if self._description['vnc']['enabled']: self._vnc_port = VNCAllocator.get_free_vnc_port() self._logger.debug(f"bindig VNC to :{self._vnc_port}") else: self._vnc_port = None self._logger.warning("Couldn't allocate a free port for VNC") if self._vnc_port: args += ['-vnc', f":{self._vnc_port}"] else: args += ['-display', 'none'] # Create QMP monitor self._qmp = QMPMonitor(self._logger) self._qmp.register_event_listener('SHUTDOWN', lambda data: self._poweroff_cleanup()) # meh args += ['-qmp', f"unix:{self._qmp.get_sock_path()},server,nowait"] # === Virtual Hardware Setup === hardware_desciption = self._description['hardware'] args += ['-m', str(hardware_desciption['ram'])] args += ['-smp', str(hardware_desciption['cpu'])] args += ['-boot', str(hardware_desciption['boot'])] # stup RTC args += ['-rtc'] if hardware_desciption['rtc_utc']: args += ['base=utc'] else: args += ['base=localtime'] # add media for media in hardware_desciption['media']: args += ['-drive', f"media={media["type"]},format={media["format"]},file={media["path"].replace(",",",,")},read-only={"on" if media["readonly"] else "off"}"] # add nic for network in hardware_desciption['network']: tapdev = TAPDevice(network['master']) self._tapdevs.append(tapdev) netdevid = f"{self._name}net{len(self._tapdevs)-1}" args += ['-netdev', f"tap,id={netdevid},ifname={tapdev.device},script=no,downscript=no"] args += ['-device', f"{network["model"]},netdev={netdevid},mac={network["mac"]}"] # === Everything prepared... launch the QEMU process === self._logger.debug(f"Executing command {" ".join(args)}") self._process = subprocess.Popen(args, preexec_fn=VM._preexec) # start the qemu process itself self._qmp.start() # Start the QMP monitor @exposed def poweroff(self): with self._lock: self._enforce_vm_state(True) self._logger.info("Powering off VM...") try: self._qmp.send_command({"execute": "system_powerdown"}) except ConnectionError: # There was a QMP connection error... Sending SIGTERM to process instead self._logger.warning("There was a QMP connection error while attempting to power off the VM. Sending SIGTERM to QEMU instead...") self.terminate(False) @exposed def terminate(self, kill=False): with self._lock: self._enforce_vm_state(True) self._logger.warning("VM is being terminated...") self._qmp.disconnect(cleanup=kill) if kill: self._process.kill() self._poweroff_cleanup() else: self._process.terminate() # Poweroff cleanup will be triggered by QMP event @exposed def reset(self): with self._lock: self._enforce_vm_state(True) self._logger.info("Resetting VM...") self._qmp.send_command({"execute": "system_reset"}) @exposed def pause(self): with self._lock: self._enforce_vm_state(True) self._logger.info("Pausing VM...") self._qmp.send_command({"execute": "stop"}) @exposed def cont(self): # continue with self._lock: self._enforce_vm_state(True) self._logger.info("Continuing VM...") self._qmp.send_command({"execute": "cont"}) @exposed def get_name(self) -> str: with self._lock: return self._name @exposed def get_vnc_port(self) -> int: with self._lock: self._enforce_vm_state(True) return self._vnc_port @exposed def is_running(self) -> bool: with self._lock: if not self._process: return False # the process object exists return self._process.poll() is None @exposed def dump_description(self) -> dict: with self._lock: return self.description_schema.dump(self._description) @exposed @transformational def update_description(self, new_description: dict): """ Replaces the current description with the supplied one """ with self._lock: self._enforce_vm_state(False) self._description = self.description_schema.load(new_description)
#!/usr/bin/env python3 import subprocess import logging import copy import os import time from schema import VMDescriptionSchema, VMNameSchema from expose import ExposedClass, exposed, transformational from exception import VMRunningError, VMNotRunningError from threading import RLock from tap_device import TAPDevice from qmp import QMPMonitor from vnc import VNCAllocator QEMU_BINARY = "/usr/bin/qemu-system-x86_64" class VM(ExposedClass): description_schema = VMDescriptionSchema(many=False) name_schema = VMNameSchema(many=False) # From the few bad solutions this is the least worse def __init__(self, name: str, description: dict): self._logger = logging.getLogger("vm") self._description = self.description_schema.load(description) self._name = self.name_schema.load({'name': name})['name'] self._logger = logging.getLogger("vm").getChild(name) self._qmp = None self._tapdevs = [] self._process = None self._vnc_port = None self._lock = RLock() @staticmethod def _preexec(): # do not forward signals (Like. SIGINT, SIGTERM) os.setpgrp() def _poweroff_cleanup(self, timeout: int = 5): if self.is_running(): self._logger.info(f"Qemu process still running. Delaying cleanup. (max. {timeout}sec)") wait_started = time.time() while self.is_running(): time.sleep(1) if (time.time() - wait_started) > 5: self._logger.warning("Cleanup delay expired. Killing Qemu!") self._process.kill() self._logger.debug("Cleaning up...") for tapdev in self._tapdevs: tapdev.free() self._tapdevs = [] self._qmp.disconnect() # Fun fact: This will be called from the qmp process self._qmp = None def _enforce_vm_state(self, running: bool): if running != self.is_running(): if self.is_running(): raise VMRunningError() else: raise VMNotRunningError() def destroy(self): with self._lock: if self.is_running(): raise VMRunningError("Can not destory running VM") def autostart(self): """ Starts the VM, if it's marked as autostart. Otherwise does nothing. """ with self._lock: if self._description['autostart']: try: self.start() except VMRunningError: self._logger.debug("Not autostarting because already running... (wtf?)") @exposed @transformational def start(self): with self._lock: self._enforce_vm_state(False) self._logger.info("Starting VM...") # The VM is not running. It's safe to kill off the QMP Monitor if self._qmp and self._qmp.is_alive(): self._logger.warning("Closing a zombie QMP Monitor... (maybe the VM was still running?)") self._qmp.disconnect(cleanup=True) self._qmp.join() # === QEMU Setup === args = [QEMU_BINARY, '-monitor', 'none'] # Monitor none disables the QEMU command prompt # Could be set to telnet or other device args += ['-serial', 'null'] # could be leaved out to disable kvm args += ['-enable-kvm', '-cpu', 'host'] args += ['-name', self._name] # setup VNC if self._description['vnc']['enabled']: self._vnc_port = VNCAllocator.get_free_vnc_port() self._logger.debug(f"bindig VNC to :{self._vnc_port}") else: self._vnc_port = None self._logger.warning("Couldn't allocate a free port for VNC") if self._vnc_port: args += ['-vnc', f":{self._vnc_port}"] else: args += ['-display', 'none'] # Create QMP monitor self._qmp = QMPMonitor(self._logger) self._qmp.register_event_listener('SHUTDOWN', lambda data: self._poweroff_cleanup()) # meh args += ['-qmp', f"unix:{self._qmp.get_sock_path()},server,nowait"] # === Virtual Hardware Setup === hardware_desciption = self._description['hardware'] args += ['-m', str(hardware_desciption['ram'])] args += ['-smp', str(hardware_desciption['cpu'])] args += ['-boot', str(hardware_desciption['boot'])] # stup RTC args += ['-rtc'] if hardware_desciption['rtc_utc']: args += ['base=utc'] else: args += ['base=localtime'] # add media for media in hardware_desciption['media']: args += ['-drive', f"media={media['type']},format={media['format']},file={media['path'].replace(',',',,')},read-only={'on' if media['readonly'] else 'off'}"] # add nic for network in hardware_desciption['network']: tapdev = TAPDevice(network['master']) self._tapdevs.append(tapdev) netdevid = f"{self._name}net{len(self._tapdevs)-1}" args += ['-netdev', f"tap,id={netdevid},ifname={tapdev.device},script=no,downscript=no"] args += ['-device', f"{network['model']},netdev={netdevid},mac={network['mac']}"] # === Everything prepared... launch the QEMU process === self._logger.debug(f"Executing command {' '.join(args)}") self._process = subprocess.Popen(args, preexec_fn=VM._preexec) # start the qemu process itself self._qmp.start() # Start the QMP monitor @exposed def poweroff(self): with self._lock: self._enforce_vm_state(True) self._logger.info("Powering off VM...") try: self._qmp.send_command({"execute": "system_powerdown"}) except ConnectionError: # There was a QMP connection error... Sending SIGTERM to process instead self._logger.warning("There was a QMP connection error while attempting to power off the VM. Sending SIGTERM to QEMU instead...") self.terminate(False) @exposed def terminate(self, kill=False): with self._lock: self._enforce_vm_state(True) self._logger.warning("VM is being terminated...") self._qmp.disconnect(cleanup=kill) if kill: self._process.kill() self._poweroff_cleanup() else: self._process.terminate() # Poweroff cleanup will be triggered by QMP event @exposed def reset(self): with self._lock: self._enforce_vm_state(True) self._logger.info("Resetting VM...") self._qmp.send_command({"execute": "system_reset"}) @exposed def pause(self): with self._lock: self._enforce_vm_state(True) self._logger.info("Pausing VM...") self._qmp.send_command({"execute": "stop"}) @exposed def cont(self): # continue with self._lock: self._enforce_vm_state(True) self._logger.info("Continuing VM...") self._qmp.send_command({"execute": "cont"}) @exposed def get_name(self) -> str: with self._lock: return self._name @exposed def get_vnc_port(self) -> int: with self._lock: self._enforce_vm_state(True) return self._vnc_port @exposed def is_running(self) -> bool: with self._lock: if not self._process: return False # the process object exists return self._process.poll() is None @exposed def dump_description(self) -> dict: with self._lock: return self.description_schema.dump(self._description) @exposed @transformational def update_description(self, new_description: dict): """ Replaces the current description with the supplied one """ with self._lock: self._enforce_vm_state(False) self._description = self.description_schema.load(new_description)
#!python3 """ Main Meshtastic """ import argparse import platform import logging import sys import time import yaml from pubsub import pub import pyqrcode import pkg_resources import meshtastic.util import meshtastic.test from . import remote_hardware from . import portnums_pb2, channel_pb2, radioconfig_pb2 from .globals import Globals have_tunnel = platform.system() == 'Linux' """We only import the tunnel code if we are on a platform that can run it. """ def onReceive(packet, interface): """Callback invoked when a packet arrives""" our_globals = Globals.getInstance() args = our_globals.get_args() try: d = packet.get('decoded') logging.debug(f'in onReceive() d:{d}') # Exit once we receive a reply if args and args.sendtext and packet["to"] == interface.myInfo.my_node_num and d["portnum"] == portnums_pb2.PortNum.TEXT_MESSAGE_APP: interface.close() # after running command then exit # Reply to every received message with some stats if args and args.reply: msg = d.get('text') if msg: rxSnr = packet['rxSnr'] hopLimit = packet['hopLimit'] print(f"message: {msg}") reply = "got msg \'{}\' with rxSnr: {} and hopLimit: {}".format(msg, rxSnr, hopLimit) print("Sending reply: ", reply) interface.sendText(reply) except Exception as ex: print(ex) def onConnection(interface, topic=pub.AUTO_TOPIC): """Callback invoked when we connect/disconnect from a radio""" print(f"Connection changed: {topic.getName()}") def getPref(attributes, name): """Get a channel or preferences value""" objDesc = attributes.DESCRIPTOR field = objDesc.fields_by_name.get(name) if not field: print(f"{attributes.__class__.__name__} does not have an attribute called {name}, so you can not get it.") print(f"Choices in sorted order are:") names = [] for f in objDesc.fields: names.append(f'{f.name}') for temp_name in sorted(names): print(f" {temp_name}") return # okay - try to read the value try: try: val = getattr(attributes, name) except TypeError: # The getter didn't like our arg type guess try again as a string val = getattr(attributes, name) # succeeded! print(f"{name}: {str(val)}") except Exception as ex: print(f"Can't get {name} due to {ex}") def setPref(attributes, name, valStr): """Set a channel or preferences value""" objDesc = attributes.DESCRIPTOR field = objDesc.fields_by_name.get(name) if not field: print(f"{attributes.__class__.__name__} does not have an attribute called {name}, so you can not set it.") print(f"Choices in sorted order are:") names = [] for f in objDesc.fields: names.append(f'{f.name}') for temp_name in sorted(names): print(f" {temp_name}") return val = meshtastic.util.fromStr(valStr) enumType = field.enum_type # pylint: disable=C0123 if enumType and type(val) == str: # We've failed so far to convert this string into an enum, try to find it by reflection e = enumType.values_by_name.get(val) if e: val = e.number else: print(f"{name} does not have an enum called {val}, so you can not set it.") print(f"Choices in sorted order are:") names = [] for f in enumType.values: names.append(f'{f.name}') for temp_name in sorted(names): print(f" {temp_name}") return # okay - try to read the value try: try: setattr(attributes, name, val) except TypeError: # The setter didn't like our arg type guess try again as a string setattr(attributes, name, valStr) # succeeded! print(f"Set {name} to {valStr}") except Exception as ex: print(f"Can't set {name} due to {ex}") def onConnected(interface): """Callback invoked when we connect to a radio""" closeNow = False # Should we drop the connection after we finish? try: our_globals = Globals.getInstance() args = our_globals.get_args() print("Connected to radio") def getNode(): """This operation could be expensive, so we try to cache the results""" targetNode = our_globals.get_target_node() if not targetNode: targetNode = interface.getNode(args.destOrLocal) our_globals.set_target_node(targetNode) return targetNode if args.setlat or args.setlon or args.setalt: closeNow = True alt = 0 lat = 0.0 lon = 0.0 prefs = interface.localNode.radioConfig.preferences if args.setalt: alt = int(args.setalt) prefs.fixed_position = True print(f"Fixing altitude at {alt} meters") if args.setlat: lat = float(args.setlat) prefs.fixed_position = True print(f"Fixing latitude at {lat} degrees") if args.setlon: lon = float(args.setlon) prefs.fixed_position = True print(f"Fixing longitude at {lon} degrees") print("Setting device position") # can include lat/long/alt etc: latitude = 37.5, longitude = -122.1 interface.sendPosition(lat, lon, alt) interface.localNode.writeConfig() elif not args.no_time: # We normally provide a current time to the mesh when we connect interface.sendPosition() if args.set_owner: closeNow = True print(f"Setting device owner to {args.set_owner}") getNode().setOwner(args.set_owner) if args.pos_fields: # If --pos-fields invoked with args, set position fields closeNow = True prefs = getNode().radioConfig.preferences allFields = 0 try: for field in args.pos_fields: v_field = radioconfig_pb2.PositionFlags.Value(field) allFields |= v_field except ValueError: print("ERROR: supported position fields are:") print(radioconfig_pb2.PositionFlags.keys()) print("If no fields are specified, will read and display current value.") else: print(f"Setting position fields to {allFields}") setPref(prefs, 'position_flags', ('%d' % allFields)) print("Writing modified preferences to device") getNode().writeConfig() elif args.pos_fields is not None: # If --pos-fields invoked without args, read and display current value closeNow = True prefs = getNode().radioConfig.preferences fieldNames = [] for bit in radioconfig_pb2.PositionFlags.values(): if prefs.position_flags & bit: fieldNames.append(radioconfig_pb2.PositionFlags.Name(bit)) print(' '.join(fieldNames)) if args.set_team: closeNow = True try: v_team = meshtastic.mesh_pb2.Team.Value(args.set_team.upper()) except ValueError: v_team = 0 print(f"ERROR: Team \'{args.set_team}\' not found.") print("Try a team name from the sorted list below, or use 'CLEAR' for unaffiliated:") print(sorted(meshtastic.mesh_pb2.Team.keys())) else: print(f"Setting team to {meshtastic.mesh_pb2.Team.Name(v_team)}") getNode().setOwner(team=v_team) if args.set_ham: closeNow = True print(f"Setting Ham ID to {args.set_ham} and turning off encryption") getNode().setOwner(args.set_ham, is_licensed=True) # Must turn off encryption on primary channel getNode().turnOffEncryptionOnPrimaryChannel() if args.reboot: closeNow = True getNode().reboot() if args.sendtext: closeNow = True channelIndex = 0 if args.ch_index is not None: channelIndex = int(args.ch_index) ch = getNode().getChannelByChannelIndex(channelIndex) if ch and ch.role != channel_pb2.Channel.Role.DISABLED: print(f"Sending text message {args.sendtext} to {args.destOrAll} on channelIndex:{channelIndex}") interface.sendText(args.sendtext, args.destOrAll, wantAck=True, channelIndex=channelIndex) else: meshtastic.util.our_exit(f"Warning: {channelIndex} is not a valid channel. Channel must not be DISABLED.") if args.sendping: payload = str.encode("test string") print(f"Sending ping message to {args.destOrAll}") interface.sendData(payload, args.destOrAll, portNum=portnums_pb2.PortNum.REPLY_APP, wantAck=True, wantResponse=True) if args.gpio_wrb or args.gpio_rd or args.gpio_watch: rhc = remote_hardware.RemoteHardwareClient(interface) if args.gpio_wrb: bitmask = 0 bitval = 0 for wrpair in (args.gpio_wrb or []): bitmask |= 1 << int(wrpair[0]) bitval |= int(wrpair[1]) << int(wrpair[0]) print(f"Writing GPIO mask 0x{bitmask:x} with value 0x{bitval:x} to {args.dest}") rhc.writeGPIOs(args.dest, bitmask, bitval) closeNow = True if args.gpio_rd: bitmask = int(args.gpio_rd, 16) print(f"Reading GPIO mask 0x{bitmask:x} from {args.dest}") interface.mask = bitmask rhc.readGPIOs(args.dest, bitmask, None) if not interface.noProto: # wait up to X seconds for a response for _ in range(10): time.sleep(1) if interface.gotResponse: break logging.debug(f'end of gpio_rd') if args.gpio_watch: bitmask = int(args.gpio_watch, 16) print(f"Watching GPIO mask 0x{bitmask:x} from {args.dest}. Press ctrl-c to exit") while True: rhc.watchGPIOs(args.dest, bitmask) time.sleep(1) # handle settings if args.set: closeNow = True prefs = getNode().radioConfig.preferences # Handle the int/float/bool arguments for pref in args.set: setPref(prefs, pref[0], pref[1]) print("Writing modified preferences to device") getNode().writeConfig() if args.configure: with open(args.configure[0], encoding='utf8') as file: configuration = yaml.safe_load(file) closeNow = True if 'owner' in configuration: print(f"Setting device owner to {configuration["owner"]}") getNode().setOwner(configuration['owner']) if 'channel_url' in configuration: print("Setting channel url to", configuration['channel_url']) getNode().setURL(configuration['channel_url']) if 'location' in configuration: alt = 0 lat = 0.0 lon = 0.0 prefs = interface.localNode.radioConfig.preferences if 'alt' in configuration['location']: alt = int(configuration['location']['alt']) prefs.fixed_position = True print(f"Fixing altitude at {alt} meters") if 'lat' in configuration['location']: lat = float(configuration['location']['lat']) prefs.fixed_position = True print(f"Fixing latitude at {lat} degrees") if 'lon' in configuration['location']: lon = float(configuration['location']['lon']) prefs.fixed_position = True print(f"Fixing longitude at {lon} degrees") print("Setting device position") interface.sendPosition(lat, lon, alt) interface.localNode.writeConfig() if 'user_prefs' in configuration: prefs = getNode().radioConfig.preferences for pref in configuration['user_prefs']: setPref(prefs, pref, str(configuration['user_prefs'][pref])) print("Writing modified preferences to device") getNode().writeConfig() if args.export_config: # export the configuration (the opposite of '--configure') closeNow = True export_config(interface) if args.seturl: closeNow = True getNode().setURL(args.seturl) # handle changing channels if args.ch_add: closeNow = True if len(args.ch_add) > 10: meshtastic.util.our_exit("Warning: Channel name must be shorter. Channel not added.") n = getNode() ch = n.getChannelByName(args.ch_add) if ch: meshtastic.util.our_exit(f"Warning: This node already has a '{args.ch_add}' channel. No changes were made.") else: # get the first channel that is disabled (i.e., available) ch = n.getDisabledChannel() if not ch: meshtastic.util.our_exit("Warning: No free channels were found") chs = channel_pb2.ChannelSettings() chs.psk = meshtastic.util.genPSK256() chs.name = args.ch_add ch.settings.CopyFrom(chs) ch.role = channel_pb2.Channel.Role.SECONDARY print(f"Writing modified channels to device") n.writeChannel(ch.index) if args.ch_del: closeNow = True channelIndex = our_globals.get_channel_index() if channelIndex is None: meshtastic.util.our_exit("Warning: Need to specify '--ch-index' for '--ch-del'.", 1) else: if channelIndex == 0: meshtastic.util.our_exit("Warning: Cannot delete primary channel.", 1) else: print(f"Deleting channel {channelIndex}") ch = getNode().deleteChannel(channelIndex) ch_changes = [args.ch_longslow, args.ch_longfast, args.ch_mediumslow, args.ch_mediumfast, args.ch_shortslow, args.ch_shortfast] any_primary_channel_changes = any(x for x in ch_changes) if args.ch_set or any_primary_channel_changes or args.ch_enable or args.ch_disable: closeNow = True channelIndex = our_globals.get_channel_index() if channelIndex is None: if any_primary_channel_changes: # we assume that they want the primary channel if they're setting range values channelIndex = 0 else: meshtastic.util.our_exit("Warning: Need to specify '--ch-index'.", 1) ch = getNode().channels[channelIndex] if any_primary_channel_changes or args.ch_enable or args.ch_disable: if channelIndex == 0 and not any_primary_channel_changes: meshtastic.util.our_exit("Warning: Cannot enable/disable PRIMARY channel.") if channelIndex != 0: if any_primary_channel_changes: meshtastic.util.our_exit("Warning: Standard channel settings can only be applied to the PRIMARY channel") enable = True # default to enable if args.ch_enable: enable = True if args.ch_disable: enable = False def setSimpleChannel(modem_config): """Set one of the simple modem_config only based channels""" # Completely new channel settings chs = channel_pb2.ChannelSettings() chs.modem_config = modem_config chs.psk = bytes([1]) # Use default channel psk 1 ch.settings.CopyFrom(chs) # handle the simple channel set commands if args.ch_longslow: setSimpleChannel(channel_pb2.ChannelSettings.ModemConfig.Bw125Cr48Sf4096) if args.ch_longfast: setSimpleChannel(channel_pb2.ChannelSettings.ModemConfig.Bw31_25Cr48Sf512) if args.ch_mediumslow: setSimpleChannel(channel_pb2.ChannelSettings.ModemConfig.Bw250Cr46Sf2048) if args.ch_mediumfast: setSimpleChannel(channel_pb2.ChannelSettings.ModemConfig.Bw250Cr47Sf1024) if args.ch_shortslow: setSimpleChannel(channel_pb2.ChannelSettings.ModemConfig.Bw125Cr45Sf128) if args.ch_shortfast: setSimpleChannel(channel_pb2.ChannelSettings.ModemConfig.Bw500Cr45Sf128) # Handle the channel settings for pref in (args.ch_set or []): if pref[0] == "psk": ch.settings.psk = meshtastic.util.fromPSK(pref[1]) else: setPref(ch.settings, pref[0], pref[1]) enable = True # If we set any pref, assume the user wants to enable the channel if enable: ch.role = channel_pb2.Channel.Role.PRIMARY if ( channelIndex == 0) else channel_pb2.Channel.Role.SECONDARY else: ch.role = channel_pb2.Channel.Role.DISABLED print(f"Writing modified channels to device") getNode().writeChannel(channelIndex) if args.info: print("") if not args.dest: # If we aren't trying to talk to our local node, don't show it interface.showInfo() print("") getNode().showInfo() closeNow = True # FIXME, for now we leave the link up while talking to remote nodes print("") if args.get: closeNow = True prefs = getNode().radioConfig.preferences # Handle the int/float/bool arguments for pref in args.get: getPref(prefs, pref[0]) print("Completed getting preferences") if args.nodes: closeNow = True interface.showNodes() if args.qr: closeNow = True url = interface.localNode.getURL(includeAll=False) print(f"Primary channel URL {url}") qr = pyqrcode.create(url) print(qr.terminal()) if have_tunnel and args.tunnel: # pylint: disable=C0415 from . import tunnel # Even if others said we could close, stay open if the user asked for a tunnel closeNow = False tunnel.Tunnel(interface, subnet=args.tunnel_net) # if the user didn't ask for serial debugging output, we might want to exit after we've done our operation if (not args.seriallog) and closeNow: interface.close() # after running command then exit except Exception as ex: print(f"Aborting due to: {ex}") interface.close() # close the connection now, so that our app exits def onNode(node): """Callback invoked when the node DB changes""" print(f"Node changed: {node}") def subscribe(): """Subscribe to the topics the user probably wants to see, prints output to stdout""" pub.subscribe(onReceive, "meshtastic.receive") # pub.subscribe(onConnection, "meshtastic.connection") # We now call onConnected from main # pub.subscribe(onConnected, "meshtastic.connection.established") # pub.subscribe(onNode, "meshtastic.node") def export_config(interface): """used in--export-config""" owner = interface.getLongName() channel_url = interface.localNode.getURL() myinfo = interface.getMyNodeInfo() pos = myinfo.get('position') lat = None lon = None alt = None if pos: lat = pos.get('latitude') lon = pos.get('longitude') alt = pos.get('altitude') config = "# start of Meshtastic configure yaml\n" if owner: config += f"owner: {owner}\n\n" if channel_url: config += f"channel_url: {channel_url}\n\n" if lat or lon or alt: config += "location:\n" if lat: config += f" lat: {lat}\n" if lon: config += f" lon: {lon}\n" if alt: config += f" alt: {alt}\n" config += "\n" preferences = f'{interface.localNode.radioConfig.preferences}' prefs = preferences.splitlines() if prefs: config += "user_prefs:\n" for pref in prefs: config += f" {meshtastic.util.quoteBooleans(pref)}\n" print(config) return config def common(): """Shared code for all of our command line wrappers""" our_globals = Globals.getInstance() args = our_globals.get_args() parser = our_globals.get_parser() logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO, format='%(levelname)s file:%(filename)s %(funcName)s line:%(lineno)s %(message)s') if len(sys.argv) == 1: parser.print_help(sys.stderr) meshtastic.util.our_exit("", 1) else: if args.support: meshtastic.util.support_info() meshtastic.util.our_exit("", 0) if args.ch_index is not None: channelIndex = int(args.ch_index) our_globals.set_channel_index(channelIndex) # Some commands require dest to be set, so we now use destOrAll/destOrLocal for more lenient commands if not args.dest: args.destOrAll = "^all" args.destOrLocal = "^local" else: args.destOrAll = args.dest args.destOrLocal = args.dest # FIXME, temp hack for debugging remove if not args.seriallog: if args.noproto: args.seriallog = "stdout" else: args.seriallog = "none" # assume no debug output in this case if args.deprecated is not None: logging.error( 'This option has been deprecated, see help below for the correct replacement...') parser.print_help(sys.stderr) meshtastic.util.our_exit('', 1) elif args.test: result = meshtastic.test.testAll() if not result: meshtastic.util.our_exit("Warning: Test was not successful.") else: meshtastic.util.our_exit("Test was a success.", 0) else: if args.seriallog == "stdout": logfile = sys.stdout elif args.seriallog == "none": args.seriallog = None logging.debug("Not logging serial output") logfile = None else: logging.info(f"Logging serial output to {args.seriallog}") # Note: using "line buffering" # pylint: disable=R1732 logfile = open(args.seriallog, 'w+', buffering=1, encoding='utf8') subscribe() if args.ble: client = meshtastic.ble_interface.BLEInterface(args.ble, debugOut=logfile, noProto=args.noproto) elif args.host: client = meshtastic.tcp_interface.TCPInterface( args.host, debugOut=logfile, noProto=args.noproto) else: client = meshtastic.serial_interface.SerialInterface( args.port, debugOut=logfile, noProto=args.noproto) # We assume client is fully connected now onConnected(client) if args.noproto or (have_tunnel and args.tunnel): # loop until someone presses ctrlc while True: time.sleep(1000) # don't call exit, background threads might be running still # sys.exit(0) def initParser(): """Initialize the command line argument parsing.""" our_globals = Globals.getInstance() parser = our_globals.get_parser() args = our_globals.get_args() parser.add_argument( "--configure", help="Specify a path to a yaml(.yml) file containing the desired settings for the connected device.", action='append') parser.add_argument( "--export-config", help="Export the configuration in yaml(.yml) format.", action='store_true') parser.add_argument( "--port", help="The port the Meshtastic device is connected to, i.e. /dev/ttyUSB0. If unspecified, we'll try to find it.", default=None) parser.add_argument( "--host", help="The hostname/ipaddr of the device to connect to (over TCP)", default=None) parser.add_argument( "--seriallog", help="Log device serial output to either 'stdout', 'none' or a filename to append to.") parser.add_argument("--info", help="Read and display the radio config information", action="store_true") parser.add_argument("--nodes", help="Print Node List in a pretty formatted table", action="store_true") parser.add_argument("--qr", help="Display the QR code that corresponds to the current channel", action="store_true") parser.add_argument( "--get", help="Get a preferences field. Use an invalid field such as '0' to get a list of all fields.", nargs=1, action='append') parser.add_argument( "--set", help="Set a preferences field", nargs=2, action='append') parser.add_argument( "--seturl", help="Set a channel URL", action="store") parser.add_argument( "--ch-index", help="Set the specified channel index. Channels start at 0 (0 is the PRIMARY channel).", action="store") parser.add_argument( "--ch-add", help="Add a secondary channel, you must specify a channel name", default=None) parser.add_argument( "--ch-del", help="Delete the ch-index channel", action='store_true') parser.add_argument( "--ch-enable", help="Enable the specified channel", action="store_true", dest="ch_enable", default=False) # Note: We are doing a double negative here (Do we want to disable? If ch_disable==True, then disable.) parser.add_argument( "--ch-disable", help="Disable the specified channel", action="store_true", dest="ch_disable", default=False) parser.add_argument( "--ch-set", help="Set a channel parameter", nargs=2, action='append') parser.add_argument( "--ch-longslow", help="Change to the long-range and slow channel", action='store_true') parser.add_argument( "--ch-longfast", help="Change to the long-range and fast channel", action='store_true') parser.add_argument( "--ch-mediumslow", help="Change to the medium-range and slow channel", action='store_true') parser.add_argument( "--ch-mediumfast", help="Change to the medium-range and fast channel", action='store_true') parser.add_argument( "--ch-shortslow", help="Change to the short-range and slow channel", action='store_true') parser.add_argument( "--ch-shortfast", help="Change to the short-range and fast channel", action='store_true') parser.add_argument( "--set-owner", help="Set device owner name", action="store") parser.add_argument( "--set-team", help="Set team affiliation (an invalid team will list valid values)", action="store") parser.add_argument( "--set-ham", help="Set licensed Ham ID and turn off encryption", action="store") parser.add_argument( "--dest", help="The destination node id for any sent commands, if not set '^all' or '^local' is assumed as appropriate", default=None) parser.add_argument( "--sendtext", help="Send a text message. Can specify a destination '--dest' and/or channel index '--ch-index'.") parser.add_argument( "--sendping", help="Send a ping message (which requests a reply)", action="store_true") parser.add_argument( "--reboot", help="Tell the destination node to reboot", action="store_true") parser.add_argument( "--reply", help="Reply to received messages", action="store_true") parser.add_argument( "--gpio-wrb", nargs=2, help="Set a particular GPIO # to 1 or 0", action='append') parser.add_argument( "--gpio-rd", help="Read from a GPIO mask (ex: '0x10')") parser.add_argument( "--gpio-watch", help="Start watching a GPIO mask for changes (ex: '0x10')") parser.add_argument( "--no-time", help="Suppress sending the current time to the mesh", action="store_true") parser.add_argument( "--setalt", help="Set device altitude (allows use without GPS)") parser.add_argument( "--setlat", help="Set device latitude (allows use without GPS)") parser.add_argument( "--setlon", help="Set device longitude (allows use without GPS)") parser.add_argument( "--pos-fields", help="Specify fields to send when sending a position. Use no argument for a list of valid values. "\ "Can pass multiple values as a space separated list like "\ "this: '--pos-fields POS_ALTITUDE POS_ALT_MSL'", nargs="*", action="store") parser.add_argument("--debug", help="Show API library debug log messages", action="store_true") parser.add_argument("--test", help="Run stress test against all connected Meshtastic devices", action="store_true") parser.add_argument("--ble", help="BLE mac address to connect to (BLE is not yet supported for this tool)", default=None) parser.add_argument("--noproto", help="Don't start the API, just function as a dumb serial terminal.", action="store_true") parser.add_argument('--setchan', dest='deprecated', nargs=2, action='append', help='Deprecated, use "--ch-set param value" instead') parser.add_argument('--set-router', dest='deprecated', action='store_true', help='Deprecated, use "--set is_router true" instead') parser.add_argument('--unset-router', dest='deprecated', action='store_false', help='Deprecated, use "--set is_router false" instead') if have_tunnel: parser.add_argument('--tunnel', action='store_true', help="Create a TUN tunnel device for forwarding IP packets over the mesh") parser.add_argument( "--subnet", dest='tunnel_net', help="Sets the local-end subnet address for the TUN IP bridge", default=None) parser.set_defaults(deprecated=None) parser.add_argument('--version', action='version', version=f"{pkg_resources.require("meshtastic")[0].version}") parser.add_argument( "--support", action='store_true', help="Show support info (useful when troubleshooting an issue)") args = parser.parse_args() our_globals.set_args(args) our_globals.set_parser(parser) def main(): """Perform command line meshtastic operations""" our_globals = Globals.getInstance() parser = argparse.ArgumentParser() our_globals.set_parser(parser) initParser() common() def tunnelMain(): """Run a meshtastic IP tunnel""" our_globals = Globals.getInstance() parser = argparse.ArgumentParser() our_globals.set_parser(parser) initParser() args = our_globals.get_args() args.tunnel = True our_globals.set_args(args) common() if __name__ == "__main__": main()
#!python3 """ Main Meshtastic """ import argparse import platform import logging import sys import time import yaml from pubsub import pub import pyqrcode import pkg_resources import meshtastic.util import meshtastic.test from . import remote_hardware from . import portnums_pb2, channel_pb2, radioconfig_pb2 from .globals import Globals have_tunnel = platform.system() == 'Linux' """We only import the tunnel code if we are on a platform that can run it. """ def onReceive(packet, interface): """Callback invoked when a packet arrives""" our_globals = Globals.getInstance() args = our_globals.get_args() try: d = packet.get('decoded') logging.debug(f'in onReceive() d:{d}') # Exit once we receive a reply if args and args.sendtext and packet["to"] == interface.myInfo.my_node_num and d["portnum"] == portnums_pb2.PortNum.TEXT_MESSAGE_APP: interface.close() # after running command then exit # Reply to every received message with some stats if args and args.reply: msg = d.get('text') if msg: rxSnr = packet['rxSnr'] hopLimit = packet['hopLimit'] print(f"message: {msg}") reply = "got msg \'{}\' with rxSnr: {} and hopLimit: {}".format(msg, rxSnr, hopLimit) print("Sending reply: ", reply) interface.sendText(reply) except Exception as ex: print(ex) def onConnection(interface, topic=pub.AUTO_TOPIC): """Callback invoked when we connect/disconnect from a radio""" print(f"Connection changed: {topic.getName()}") def getPref(attributes, name): """Get a channel or preferences value""" objDesc = attributes.DESCRIPTOR field = objDesc.fields_by_name.get(name) if not field: print(f"{attributes.__class__.__name__} does not have an attribute called {name}, so you can not get it.") print(f"Choices in sorted order are:") names = [] for f in objDesc.fields: names.append(f'{f.name}') for temp_name in sorted(names): print(f" {temp_name}") return # okay - try to read the value try: try: val = getattr(attributes, name) except TypeError: # The getter didn't like our arg type guess try again as a string val = getattr(attributes, name) # succeeded! print(f"{name}: {str(val)}") except Exception as ex: print(f"Can't get {name} due to {ex}") def setPref(attributes, name, valStr): """Set a channel or preferences value""" objDesc = attributes.DESCRIPTOR field = objDesc.fields_by_name.get(name) if not field: print(f"{attributes.__class__.__name__} does not have an attribute called {name}, so you can not set it.") print(f"Choices in sorted order are:") names = [] for f in objDesc.fields: names.append(f'{f.name}') for temp_name in sorted(names): print(f" {temp_name}") return val = meshtastic.util.fromStr(valStr) enumType = field.enum_type # pylint: disable=C0123 if enumType and type(val) == str: # We've failed so far to convert this string into an enum, try to find it by reflection e = enumType.values_by_name.get(val) if e: val = e.number else: print(f"{name} does not have an enum called {val}, so you can not set it.") print(f"Choices in sorted order are:") names = [] for f in enumType.values: names.append(f'{f.name}') for temp_name in sorted(names): print(f" {temp_name}") return # okay - try to read the value try: try: setattr(attributes, name, val) except TypeError: # The setter didn't like our arg type guess try again as a string setattr(attributes, name, valStr) # succeeded! print(f"Set {name} to {valStr}") except Exception as ex: print(f"Can't set {name} due to {ex}") def onConnected(interface): """Callback invoked when we connect to a radio""" closeNow = False # Should we drop the connection after we finish? try: our_globals = Globals.getInstance() args = our_globals.get_args() print("Connected to radio") def getNode(): """This operation could be expensive, so we try to cache the results""" targetNode = our_globals.get_target_node() if not targetNode: targetNode = interface.getNode(args.destOrLocal) our_globals.set_target_node(targetNode) return targetNode if args.setlat or args.setlon or args.setalt: closeNow = True alt = 0 lat = 0.0 lon = 0.0 prefs = interface.localNode.radioConfig.preferences if args.setalt: alt = int(args.setalt) prefs.fixed_position = True print(f"Fixing altitude at {alt} meters") if args.setlat: lat = float(args.setlat) prefs.fixed_position = True print(f"Fixing latitude at {lat} degrees") if args.setlon: lon = float(args.setlon) prefs.fixed_position = True print(f"Fixing longitude at {lon} degrees") print("Setting device position") # can include lat/long/alt etc: latitude = 37.5, longitude = -122.1 interface.sendPosition(lat, lon, alt) interface.localNode.writeConfig() elif not args.no_time: # We normally provide a current time to the mesh when we connect interface.sendPosition() if args.set_owner: closeNow = True print(f"Setting device owner to {args.set_owner}") getNode().setOwner(args.set_owner) if args.pos_fields: # If --pos-fields invoked with args, set position fields closeNow = True prefs = getNode().radioConfig.preferences allFields = 0 try: for field in args.pos_fields: v_field = radioconfig_pb2.PositionFlags.Value(field) allFields |= v_field except ValueError: print("ERROR: supported position fields are:") print(radioconfig_pb2.PositionFlags.keys()) print("If no fields are specified, will read and display current value.") else: print(f"Setting position fields to {allFields}") setPref(prefs, 'position_flags', ('%d' % allFields)) print("Writing modified preferences to device") getNode().writeConfig() elif args.pos_fields is not None: # If --pos-fields invoked without args, read and display current value closeNow = True prefs = getNode().radioConfig.preferences fieldNames = [] for bit in radioconfig_pb2.PositionFlags.values(): if prefs.position_flags & bit: fieldNames.append(radioconfig_pb2.PositionFlags.Name(bit)) print(' '.join(fieldNames)) if args.set_team: closeNow = True try: v_team = meshtastic.mesh_pb2.Team.Value(args.set_team.upper()) except ValueError: v_team = 0 print(f"ERROR: Team \'{args.set_team}\' not found.") print("Try a team name from the sorted list below, or use 'CLEAR' for unaffiliated:") print(sorted(meshtastic.mesh_pb2.Team.keys())) else: print(f"Setting team to {meshtastic.mesh_pb2.Team.Name(v_team)}") getNode().setOwner(team=v_team) if args.set_ham: closeNow = True print(f"Setting Ham ID to {args.set_ham} and turning off encryption") getNode().setOwner(args.set_ham, is_licensed=True) # Must turn off encryption on primary channel getNode().turnOffEncryptionOnPrimaryChannel() if args.reboot: closeNow = True getNode().reboot() if args.sendtext: closeNow = True channelIndex = 0 if args.ch_index is not None: channelIndex = int(args.ch_index) ch = getNode().getChannelByChannelIndex(channelIndex) if ch and ch.role != channel_pb2.Channel.Role.DISABLED: print(f"Sending text message {args.sendtext} to {args.destOrAll} on channelIndex:{channelIndex}") interface.sendText(args.sendtext, args.destOrAll, wantAck=True, channelIndex=channelIndex) else: meshtastic.util.our_exit(f"Warning: {channelIndex} is not a valid channel. Channel must not be DISABLED.") if args.sendping: payload = str.encode("test string") print(f"Sending ping message to {args.destOrAll}") interface.sendData(payload, args.destOrAll, portNum=portnums_pb2.PortNum.REPLY_APP, wantAck=True, wantResponse=True) if args.gpio_wrb or args.gpio_rd or args.gpio_watch: rhc = remote_hardware.RemoteHardwareClient(interface) if args.gpio_wrb: bitmask = 0 bitval = 0 for wrpair in (args.gpio_wrb or []): bitmask |= 1 << int(wrpair[0]) bitval |= int(wrpair[1]) << int(wrpair[0]) print(f"Writing GPIO mask 0x{bitmask:x} with value 0x{bitval:x} to {args.dest}") rhc.writeGPIOs(args.dest, bitmask, bitval) closeNow = True if args.gpio_rd: bitmask = int(args.gpio_rd, 16) print(f"Reading GPIO mask 0x{bitmask:x} from {args.dest}") interface.mask = bitmask rhc.readGPIOs(args.dest, bitmask, None) if not interface.noProto: # wait up to X seconds for a response for _ in range(10): time.sleep(1) if interface.gotResponse: break logging.debug(f'end of gpio_rd') if args.gpio_watch: bitmask = int(args.gpio_watch, 16) print(f"Watching GPIO mask 0x{bitmask:x} from {args.dest}. Press ctrl-c to exit") while True: rhc.watchGPIOs(args.dest, bitmask) time.sleep(1) # handle settings if args.set: closeNow = True prefs = getNode().radioConfig.preferences # Handle the int/float/bool arguments for pref in args.set: setPref(prefs, pref[0], pref[1]) print("Writing modified preferences to device") getNode().writeConfig() if args.configure: with open(args.configure[0], encoding='utf8') as file: configuration = yaml.safe_load(file) closeNow = True if 'owner' in configuration: print(f"Setting device owner to {configuration['owner']}") getNode().setOwner(configuration['owner']) if 'channel_url' in configuration: print("Setting channel url to", configuration['channel_url']) getNode().setURL(configuration['channel_url']) if 'location' in configuration: alt = 0 lat = 0.0 lon = 0.0 prefs = interface.localNode.radioConfig.preferences if 'alt' in configuration['location']: alt = int(configuration['location']['alt']) prefs.fixed_position = True print(f"Fixing altitude at {alt} meters") if 'lat' in configuration['location']: lat = float(configuration['location']['lat']) prefs.fixed_position = True print(f"Fixing latitude at {lat} degrees") if 'lon' in configuration['location']: lon = float(configuration['location']['lon']) prefs.fixed_position = True print(f"Fixing longitude at {lon} degrees") print("Setting device position") interface.sendPosition(lat, lon, alt) interface.localNode.writeConfig() if 'user_prefs' in configuration: prefs = getNode().radioConfig.preferences for pref in configuration['user_prefs']: setPref(prefs, pref, str(configuration['user_prefs'][pref])) print("Writing modified preferences to device") getNode().writeConfig() if args.export_config: # export the configuration (the opposite of '--configure') closeNow = True export_config(interface) if args.seturl: closeNow = True getNode().setURL(args.seturl) # handle changing channels if args.ch_add: closeNow = True if len(args.ch_add) > 10: meshtastic.util.our_exit("Warning: Channel name must be shorter. Channel not added.") n = getNode() ch = n.getChannelByName(args.ch_add) if ch: meshtastic.util.our_exit(f"Warning: This node already has a '{args.ch_add}' channel. No changes were made.") else: # get the first channel that is disabled (i.e., available) ch = n.getDisabledChannel() if not ch: meshtastic.util.our_exit("Warning: No free channels were found") chs = channel_pb2.ChannelSettings() chs.psk = meshtastic.util.genPSK256() chs.name = args.ch_add ch.settings.CopyFrom(chs) ch.role = channel_pb2.Channel.Role.SECONDARY print(f"Writing modified channels to device") n.writeChannel(ch.index) if args.ch_del: closeNow = True channelIndex = our_globals.get_channel_index() if channelIndex is None: meshtastic.util.our_exit("Warning: Need to specify '--ch-index' for '--ch-del'.", 1) else: if channelIndex == 0: meshtastic.util.our_exit("Warning: Cannot delete primary channel.", 1) else: print(f"Deleting channel {channelIndex}") ch = getNode().deleteChannel(channelIndex) ch_changes = [args.ch_longslow, args.ch_longfast, args.ch_mediumslow, args.ch_mediumfast, args.ch_shortslow, args.ch_shortfast] any_primary_channel_changes = any(x for x in ch_changes) if args.ch_set or any_primary_channel_changes or args.ch_enable or args.ch_disable: closeNow = True channelIndex = our_globals.get_channel_index() if channelIndex is None: if any_primary_channel_changes: # we assume that they want the primary channel if they're setting range values channelIndex = 0 else: meshtastic.util.our_exit("Warning: Need to specify '--ch-index'.", 1) ch = getNode().channels[channelIndex] if any_primary_channel_changes or args.ch_enable or args.ch_disable: if channelIndex == 0 and not any_primary_channel_changes: meshtastic.util.our_exit("Warning: Cannot enable/disable PRIMARY channel.") if channelIndex != 0: if any_primary_channel_changes: meshtastic.util.our_exit("Warning: Standard channel settings can only be applied to the PRIMARY channel") enable = True # default to enable if args.ch_enable: enable = True if args.ch_disable: enable = False def setSimpleChannel(modem_config): """Set one of the simple modem_config only based channels""" # Completely new channel settings chs = channel_pb2.ChannelSettings() chs.modem_config = modem_config chs.psk = bytes([1]) # Use default channel psk 1 ch.settings.CopyFrom(chs) # handle the simple channel set commands if args.ch_longslow: setSimpleChannel(channel_pb2.ChannelSettings.ModemConfig.Bw125Cr48Sf4096) if args.ch_longfast: setSimpleChannel(channel_pb2.ChannelSettings.ModemConfig.Bw31_25Cr48Sf512) if args.ch_mediumslow: setSimpleChannel(channel_pb2.ChannelSettings.ModemConfig.Bw250Cr46Sf2048) if args.ch_mediumfast: setSimpleChannel(channel_pb2.ChannelSettings.ModemConfig.Bw250Cr47Sf1024) if args.ch_shortslow: setSimpleChannel(channel_pb2.ChannelSettings.ModemConfig.Bw125Cr45Sf128) if args.ch_shortfast: setSimpleChannel(channel_pb2.ChannelSettings.ModemConfig.Bw500Cr45Sf128) # Handle the channel settings for pref in (args.ch_set or []): if pref[0] == "psk": ch.settings.psk = meshtastic.util.fromPSK(pref[1]) else: setPref(ch.settings, pref[0], pref[1]) enable = True # If we set any pref, assume the user wants to enable the channel if enable: ch.role = channel_pb2.Channel.Role.PRIMARY if ( channelIndex == 0) else channel_pb2.Channel.Role.SECONDARY else: ch.role = channel_pb2.Channel.Role.DISABLED print(f"Writing modified channels to device") getNode().writeChannel(channelIndex) if args.info: print("") if not args.dest: # If we aren't trying to talk to our local node, don't show it interface.showInfo() print("") getNode().showInfo() closeNow = True # FIXME, for now we leave the link up while talking to remote nodes print("") if args.get: closeNow = True prefs = getNode().radioConfig.preferences # Handle the int/float/bool arguments for pref in args.get: getPref(prefs, pref[0]) print("Completed getting preferences") if args.nodes: closeNow = True interface.showNodes() if args.qr: closeNow = True url = interface.localNode.getURL(includeAll=False) print(f"Primary channel URL {url}") qr = pyqrcode.create(url) print(qr.terminal()) if have_tunnel and args.tunnel: # pylint: disable=C0415 from . import tunnel # Even if others said we could close, stay open if the user asked for a tunnel closeNow = False tunnel.Tunnel(interface, subnet=args.tunnel_net) # if the user didn't ask for serial debugging output, we might want to exit after we've done our operation if (not args.seriallog) and closeNow: interface.close() # after running command then exit except Exception as ex: print(f"Aborting due to: {ex}") interface.close() # close the connection now, so that our app exits def onNode(node): """Callback invoked when the node DB changes""" print(f"Node changed: {node}") def subscribe(): """Subscribe to the topics the user probably wants to see, prints output to stdout""" pub.subscribe(onReceive, "meshtastic.receive") # pub.subscribe(onConnection, "meshtastic.connection") # We now call onConnected from main # pub.subscribe(onConnected, "meshtastic.connection.established") # pub.subscribe(onNode, "meshtastic.node") def export_config(interface): """used in--export-config""" owner = interface.getLongName() channel_url = interface.localNode.getURL() myinfo = interface.getMyNodeInfo() pos = myinfo.get('position') lat = None lon = None alt = None if pos: lat = pos.get('latitude') lon = pos.get('longitude') alt = pos.get('altitude') config = "# start of Meshtastic configure yaml\n" if owner: config += f"owner: {owner}\n\n" if channel_url: config += f"channel_url: {channel_url}\n\n" if lat or lon or alt: config += "location:\n" if lat: config += f" lat: {lat}\n" if lon: config += f" lon: {lon}\n" if alt: config += f" alt: {alt}\n" config += "\n" preferences = f'{interface.localNode.radioConfig.preferences}' prefs = preferences.splitlines() if prefs: config += "user_prefs:\n" for pref in prefs: config += f" {meshtastic.util.quoteBooleans(pref)}\n" print(config) return config def common(): """Shared code for all of our command line wrappers""" our_globals = Globals.getInstance() args = our_globals.get_args() parser = our_globals.get_parser() logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO, format='%(levelname)s file:%(filename)s %(funcName)s line:%(lineno)s %(message)s') if len(sys.argv) == 1: parser.print_help(sys.stderr) meshtastic.util.our_exit("", 1) else: if args.support: meshtastic.util.support_info() meshtastic.util.our_exit("", 0) if args.ch_index is not None: channelIndex = int(args.ch_index) our_globals.set_channel_index(channelIndex) # Some commands require dest to be set, so we now use destOrAll/destOrLocal for more lenient commands if not args.dest: args.destOrAll = "^all" args.destOrLocal = "^local" else: args.destOrAll = args.dest args.destOrLocal = args.dest # FIXME, temp hack for debugging remove if not args.seriallog: if args.noproto: args.seriallog = "stdout" else: args.seriallog = "none" # assume no debug output in this case if args.deprecated is not None: logging.error( 'This option has been deprecated, see help below for the correct replacement...') parser.print_help(sys.stderr) meshtastic.util.our_exit('', 1) elif args.test: result = meshtastic.test.testAll() if not result: meshtastic.util.our_exit("Warning: Test was not successful.") else: meshtastic.util.our_exit("Test was a success.", 0) else: if args.seriallog == "stdout": logfile = sys.stdout elif args.seriallog == "none": args.seriallog = None logging.debug("Not logging serial output") logfile = None else: logging.info(f"Logging serial output to {args.seriallog}") # Note: using "line buffering" # pylint: disable=R1732 logfile = open(args.seriallog, 'w+', buffering=1, encoding='utf8') subscribe() if args.ble: client = meshtastic.ble_interface.BLEInterface(args.ble, debugOut=logfile, noProto=args.noproto) elif args.host: client = meshtastic.tcp_interface.TCPInterface( args.host, debugOut=logfile, noProto=args.noproto) else: client = meshtastic.serial_interface.SerialInterface( args.port, debugOut=logfile, noProto=args.noproto) # We assume client is fully connected now onConnected(client) if args.noproto or (have_tunnel and args.tunnel): # loop until someone presses ctrlc while True: time.sleep(1000) # don't call exit, background threads might be running still # sys.exit(0) def initParser(): """Initialize the command line argument parsing.""" our_globals = Globals.getInstance() parser = our_globals.get_parser() args = our_globals.get_args() parser.add_argument( "--configure", help="Specify a path to a yaml(.yml) file containing the desired settings for the connected device.", action='append') parser.add_argument( "--export-config", help="Export the configuration in yaml(.yml) format.", action='store_true') parser.add_argument( "--port", help="The port the Meshtastic device is connected to, i.e. /dev/ttyUSB0. If unspecified, we'll try to find it.", default=None) parser.add_argument( "--host", help="The hostname/ipaddr of the device to connect to (over TCP)", default=None) parser.add_argument( "--seriallog", help="Log device serial output to either 'stdout', 'none' or a filename to append to.") parser.add_argument("--info", help="Read and display the radio config information", action="store_true") parser.add_argument("--nodes", help="Print Node List in a pretty formatted table", action="store_true") parser.add_argument("--qr", help="Display the QR code that corresponds to the current channel", action="store_true") parser.add_argument( "--get", help="Get a preferences field. Use an invalid field such as '0' to get a list of all fields.", nargs=1, action='append') parser.add_argument( "--set", help="Set a preferences field", nargs=2, action='append') parser.add_argument( "--seturl", help="Set a channel URL", action="store") parser.add_argument( "--ch-index", help="Set the specified channel index. Channels start at 0 (0 is the PRIMARY channel).", action="store") parser.add_argument( "--ch-add", help="Add a secondary channel, you must specify a channel name", default=None) parser.add_argument( "--ch-del", help="Delete the ch-index channel", action='store_true') parser.add_argument( "--ch-enable", help="Enable the specified channel", action="store_true", dest="ch_enable", default=False) # Note: We are doing a double negative here (Do we want to disable? If ch_disable==True, then disable.) parser.add_argument( "--ch-disable", help="Disable the specified channel", action="store_true", dest="ch_disable", default=False) parser.add_argument( "--ch-set", help="Set a channel parameter", nargs=2, action='append') parser.add_argument( "--ch-longslow", help="Change to the long-range and slow channel", action='store_true') parser.add_argument( "--ch-longfast", help="Change to the long-range and fast channel", action='store_true') parser.add_argument( "--ch-mediumslow", help="Change to the medium-range and slow channel", action='store_true') parser.add_argument( "--ch-mediumfast", help="Change to the medium-range and fast channel", action='store_true') parser.add_argument( "--ch-shortslow", help="Change to the short-range and slow channel", action='store_true') parser.add_argument( "--ch-shortfast", help="Change to the short-range and fast channel", action='store_true') parser.add_argument( "--set-owner", help="Set device owner name", action="store") parser.add_argument( "--set-team", help="Set team affiliation (an invalid team will list valid values)", action="store") parser.add_argument( "--set-ham", help="Set licensed Ham ID and turn off encryption", action="store") parser.add_argument( "--dest", help="The destination node id for any sent commands, if not set '^all' or '^local' is assumed as appropriate", default=None) parser.add_argument( "--sendtext", help="Send a text message. Can specify a destination '--dest' and/or channel index '--ch-index'.") parser.add_argument( "--sendping", help="Send a ping message (which requests a reply)", action="store_true") parser.add_argument( "--reboot", help="Tell the destination node to reboot", action="store_true") parser.add_argument( "--reply", help="Reply to received messages", action="store_true") parser.add_argument( "--gpio-wrb", nargs=2, help="Set a particular GPIO # to 1 or 0", action='append') parser.add_argument( "--gpio-rd", help="Read from a GPIO mask (ex: '0x10')") parser.add_argument( "--gpio-watch", help="Start watching a GPIO mask for changes (ex: '0x10')") parser.add_argument( "--no-time", help="Suppress sending the current time to the mesh", action="store_true") parser.add_argument( "--setalt", help="Set device altitude (allows use without GPS)") parser.add_argument( "--setlat", help="Set device latitude (allows use without GPS)") parser.add_argument( "--setlon", help="Set device longitude (allows use without GPS)") parser.add_argument( "--pos-fields", help="Specify fields to send when sending a position. Use no argument for a list of valid values. "\ "Can pass multiple values as a space separated list like "\ "this: '--pos-fields POS_ALTITUDE POS_ALT_MSL'", nargs="*", action="store") parser.add_argument("--debug", help="Show API library debug log messages", action="store_true") parser.add_argument("--test", help="Run stress test against all connected Meshtastic devices", action="store_true") parser.add_argument("--ble", help="BLE mac address to connect to (BLE is not yet supported for this tool)", default=None) parser.add_argument("--noproto", help="Don't start the API, just function as a dumb serial terminal.", action="store_true") parser.add_argument('--setchan', dest='deprecated', nargs=2, action='append', help='Deprecated, use "--ch-set param value" instead') parser.add_argument('--set-router', dest='deprecated', action='store_true', help='Deprecated, use "--set is_router true" instead') parser.add_argument('--unset-router', dest='deprecated', action='store_false', help='Deprecated, use "--set is_router false" instead') if have_tunnel: parser.add_argument('--tunnel', action='store_true', help="Create a TUN tunnel device for forwarding IP packets over the mesh") parser.add_argument( "--subnet", dest='tunnel_net', help="Sets the local-end subnet address for the TUN IP bridge", default=None) parser.set_defaults(deprecated=None) parser.add_argument('--version', action='version', version=f"{pkg_resources.require('meshtastic')[0].version}") parser.add_argument( "--support", action='store_true', help="Show support info (useful when troubleshooting an issue)") args = parser.parse_args() our_globals.set_args(args) our_globals.set_parser(parser) def main(): """Perform command line meshtastic operations""" our_globals = Globals.getInstance() parser = argparse.ArgumentParser() our_globals.set_parser(parser) initParser() common() def tunnelMain(): """Run a meshtastic IP tunnel""" our_globals = Globals.getInstance() parser = argparse.ArgumentParser() our_globals.set_parser(parser) initParser() args = our_globals.get_args() args.tunnel = True our_globals.set_args(args) common() if __name__ == "__main__": main()
# -*- coding: utf-8 -*- ''' Module to interact with Junos devices. ''' from __future__ import absolute_import # Import python libraries import logging import json import os try: from lxml import etree except ImportError: from salt._compat import ElementTree as etree # Juniper interface libraries # https://github.com/Juniper/py-junos-eznc try: # pylint: disable=W0611 from jnpr.junos import Device from jnpr.junos.utils.sw import SW from jnpr.junos.utils.scp import SCP import jnpr.junos.utils import jnpr.junos.cfg import jxmlease # pylint: enable=W0611 HAS_JUNOS = True except ImportError: HAS_JUNOS = False # Import salt libraries from salt.utils import fopen from salt.utils import files from salt.utils import safe_rm # Set up logging log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'junos' __proxyenabled__ = ['junos'] def __virtual__(): ''' We need the Junos adapter libraries for this module to work. We also need a proxymodule entry in __opts__ in the opts dictionary ''' if HAS_JUNOS and 'proxy' in __opts__: return __virtualname__ else: return (False, 'The junos module could not be loaded: ' 'junos-eznc or jxmlease or proxy could not be loaded.') def facts_refresh(): ''' Reload the facts dictionary from the device. Usually only needed if, the device configuration is changed by some other actor. This function will also refresh the facts stored in the salt grains. Usage: .. code-block:: bash salt 'device_name' junos.facts_refresh ''' conn = __proxy__['junos.conn']() ret = dict() ret['out'] = True try: conn.facts_refresh() except Exception as exception: ret['message'] = 'Execution failed due to "{0}"'.format(exception) ret['out'] = False return ret ret['facts'] = __proxy__['junos.get_serialized_facts']() try: __salt__['saltutil.sync_grains']() except Exception as exception: log.error('Grains could not be updated due to "{0}"'.format(exception)) return ret def facts(): ''' Displays the facts gathered during the connection. These facts are also stored in Salt grains. Usage: .. code-block:: bash salt 'device_name' junos.facts ''' ret = dict() try: ret['facts'] = __proxy__['junos.get_serialized_facts']() ret['out'] = True except Exception as exception: ret['message'] = 'Could not display facts due to "{0}"'.format( exception) ret['out'] = False return ret def rpc(cmd=None, dest=None, format='xml', **kwargs): ''' This function executes the rpc provided as arguments on the junos device. The returned data can be stored in a file. Usage: .. code-block:: bash salt 'device' junos.rpc 'get_config' '/var/log/config.txt' 'text' filter='<configuration><system/></configuration>' salt 'device' junos.rpc 'get-interface-information' '/home/user/interface.xml' interface_name='lo0' terse=True salt 'device' junos.rpc 'get-chassis-inventory' Parameters: Required * cmd: The rpc to be executed. (default = None) Optional * dest: Destination file where the rpc ouput is stored. (default = None) Note that the file will be stored on the proxy minion. To push the files to the master use the salt's following execution module: :py:func:`cp.push <salt.modules.cp.push>` * format: The format in which the rpc reply is received from the device. (default = xml) * kwargs: keyworded arguments taken by rpc call like- * dev_timeout: Set NETCONF RPC timeout. Can be used for commands which take a while to execute. (default= 30 seconds) * filter: Only to be used with 'get-config' rpc to get specific configuration. * terse: Amount of information you want. * interface_name: Name of the interface whose information you want. ''' conn = __proxy__['junos.conn']() ret = dict() ret['out'] = True if cmd is None: ret['message'] = 'Please provide the rpc to execute.' ret['out'] = False return ret op = dict() if '__pub_arg' in kwargs: if kwargs['__pub_arg']: if isinstance(kwargs['__pub_arg'][-1], dict): op.update(kwargs['__pub_arg'][-1]) else: op.update(kwargs) op['dev_timeout'] = str(op.pop('timeout', conn.timeout)) if cmd in ['get-config', 'get_config']: filter_reply = None if 'filter' in op: filter_reply = etree.XML(op['filter']) del op['filter'] op.update({'format': format}) try: reply = getattr( conn.rpc, cmd.replace('-', '_'))(filter_reply, options=op) except Exception as exception: ret['message'] = 'RPC execution failed due to "{0}"'.format( exception) ret['out'] = False return ret else: op['dev_timeout'] = int(op['dev_timeout']) if 'filter' in op: log.warning( 'Filter ignored as it is only used with "get-config" rpc') try: reply = getattr( conn.rpc, cmd.replace('-', '_'))({'format': format}, **op) except Exception as exception: ret['message'] = 'RPC execution failed due to "{0}"'.format( exception) ret['out'] = False return ret if format == 'text': # Earlier it was ret['message'] ret['rpc_reply'] = reply.text elif format == 'json': # Earlier it was ret['message'] ret['rpc_reply'] = reply else: # Earlier it was ret['message'] ret['rpc_reply'] = jxmlease.parse(etree.tostring(reply)) if dest: if format == 'text': write_response = reply.text elif format == 'json': write_response = json.dumps(reply, indent=1) else: write_response = etree.tostring(reply) with fopen(dest, 'w') as fp: fp.write(write_response) return ret def set_hostname(hostname=None, **kwargs): ''' To set the name of the device. Usage: .. code-block:: bash salt 'device_name' junos.set_hostname salt-device Parameters: Required * hostname: The name to be set. (default = None) Optional * kwargs: Keyworded arguments which can be provided like- * dev_timeout: Set NETCONF RPC timeout. Can be used for commands which take a while to execute. (default = 30 seconds) * comment: Provide a comment to the commit. (default = None) * confirm: Provide time in minutes for commit confirmation. \ If this option is specified, the commit will be rollbacked in \ the given time unless the commit is confirmed. ''' conn = __proxy__['junos.conn']() ret = dict() if hostname is None: ret['message'] = 'Please provide the hostname.' ret['out'] = False return ret op = dict() if '__pub_arg' in kwargs: if kwargs['__pub_arg']: if isinstance(kwargs['__pub_arg'][-1], dict): op.update(kwargs['__pub_arg'][-1]) else: op.update(kwargs) # Added to recent versions of JunOs # Use text format instead set_string = 'set system host-name {0}'.format(hostname) try: conn.cu.load(set_string, format='set') except Exception as exception: ret['message'] = 'Could not load configuration due to error "{0}"'.format( exception) ret['out'] = False return ret try: commit_ok = conn.cu.commit_check() except Exception as exception: ret['message'] = 'Could not commit check due to error "{0}"'.format( exception) ret['out'] = False return ret if commit_ok: try: conn.cu.commit(**op) ret['message'] = 'Successfully changed hostname.' ret['out'] = True except Exception as exception: ret['out'] = False ret['message'] = 'Successfully loaded host-name but commit failed with "{0}"'.format( exception) return ret else: ret['out'] = False ret[ 'message'] = 'Successfully loaded host-name but pre-commit check failed.' conn.cu.rollback() return ret def commit(**kwargs): ''' To commit the changes loaded in the candidate configuration. Usage: .. code-block:: bash salt 'device_name' junos.commit comment='Commiting via saltstack' detail=True salt 'device_name' junos.commit dev_timeout=60 confirm=10 salt 'device_name' junos.commit sync=True dev_timeout=90 Parameters: Optional * kwargs: Keyworded arguments which can be provided like- * dev_timeout: Set NETCONF RPC timeout. Can be used for commands which take a \ while to execute. (default = 30 seconds) * comment: Provide a comment to the commit. (default = None) * confirm: Provide time in minutes for commit confirmation. If this option \ is specified, the commit will be rollbacked in the given time \ unless the commit is confirmed. * sync: On dual control plane systems, requests that the candidate\ configuration on one control plane be copied to the other \ control plane,checked for correct syntax, and committed on \ both Routing Engines. (default = False) * force_sync: On dual control plane systems, force the candidate configuration on one control plane to be copied to the other control plane. * full: When set to True requires all the daemons to check and evaluate \ the new configuration. * detail: When true return commit detail. ''' conn = __proxy__['junos.conn']() ret = {} op = dict() if '__pub_arg' in kwargs: if kwargs['__pub_arg']: if isinstance(kwargs['__pub_arg'][-1], dict): op.update(kwargs['__pub_arg'][-1]) else: op.update(kwargs) op['detail'] = op.get('detail', False) try: commit_ok = conn.cu.commit_check() except Exception as exception: ret['message'] = 'Could not perform commit check due to "{0}"'.format( exception) ret['out'] = False return ret if commit_ok: try: commit = conn.cu.commit(**op) ret['out'] = True if commit: if op['detail']: ret['message'] = jxmlease.parse(etree.tostring(commit)) else: ret['message'] = 'Commit Successful.' else: ret['message'] = 'Commit failed.' ret['out'] = False except Exception as exception: ret['out'] = False ret['message'] = \ 'Commit check succeeded but actual commit failed with "{0}"' \ .format(exception) else: ret['out'] = False ret['message'] = 'Pre-commit check failed.' conn.cu.rollback() return ret def rollback(id=0, **kwargs): ''' To rollback the last committed configuration changes Usage: .. code-block:: bash salt 'device_name' junos.rollback 10 Parameters: Optional * id: The rollback id value [0-49]. (default = 0) * kwargs: Keyworded arguments which can be provided like- * dev_timeout: Set NETCONF RPC timeout. Can be used for commands which take a while to execute. (default = 30 seconds) * comment: Provide a comment to the commit. (default = None) * confirm: Provide time in minutes for commit confirmation. If this option \ is specified, the commit will be rollbacked in the given time \ unless the commit is confirmed. * diffs_file: Path to the file where any diffs will be written. (default = None) ''' ret = dict() conn = __proxy__['junos.conn']() op = dict() if '__pub_arg' in kwargs: if kwargs['__pub_arg']: if isinstance(kwargs['__pub_arg'][-1], dict): op.update(kwargs['__pub_arg'][-1]) else: op.update(kwargs) try: ret['out'] = conn.cu.rollback(id) except Exception as exception: ret['message'] = 'Rollback failed due to "{0}"'.format(exception) ret['out'] = False return ret if ret['out']: ret['message'] = 'Rollback successful' else: ret['message'] = 'Rollback failed' return ret if 'diffs_file' in op and op['diffs_file'] is not None: diff = conn.cu.diff() if diff is not None: with fopen(op['diffs_file'], 'w') as fp: fp.write(diff) else: log.info( 'No diff between current configuration and \ rollbacked configuration, so no diff file created') try: commit_ok = conn.cu.commit_check() except Exception as exception: ret['message'] = 'Could not commit check due to "{0}"'.format( exception) ret['out'] = False return ret if commit_ok: try: conn.cu.commit(**op) ret['out'] = True except Exception as exception: ret['out'] = False ret['message'] = \ 'Rollback successful but commit failed with error "{0}"'\ .format(exception) return ret else: ret['message'] = 'Rollback succesfull but pre-commit check failed.' ret['out'] = False return ret def diff(id=0): ''' Gives the difference between the candidate and the current configuration. Usage: .. code-block:: bash salt 'device_name' junos.diff 3 Parameters: Optional * id: The rollback id value [0-49]. (default = 0) ''' conn = __proxy__['junos.conn']() ret = dict() ret['out'] = True try: ret['message'] = conn.cu.diff(rb_id=id) except Exception as exception: ret['message'] = 'Could not get diff with error "{0}"'.format( exception) ret['out'] = False return ret def ping(dest_ip=None, **kwargs): ''' To send ping RPC to a device. Usage: .. code-block:: bash salt 'device_name' junos.ping '8.8.8.8' count=5 salt 'device_name' junos.ping '8.8.8.8' ttl=1 rapid=True Parameters: Required * dest_ip: The IP which is to be pinged. (default = None) Optional * kwargs: Keyworded arguments which can be provided like- * dev_timeout: Set NETCONF RPC timeout. Can be used for commands which take a while to execute. (default = 30 seconds) * rapid: Setting this to True executes ping at 100pps instead of 1pps. \ (default = False) * ttl: Maximum number of IP routers (IP hops) allowed between source \ and destination. * routing_instance: Name of the routing instance to use to send the ping. * interface: Interface used to send traffic out. * count: Number of packets to send. (default = 5) ''' conn = __proxy__['junos.conn']() ret = dict() if dest_ip is None: ret['message'] = 'Please specify the destination ip to ping.' ret['out'] = False return ret op = {'host': dest_ip} if '__pub_arg' in kwargs: if kwargs['__pub_arg']: if isinstance(kwargs['__pub_arg'][-1], dict): op.update(kwargs['__pub_arg'][-1]) else: op.update(kwargs) op['count'] = str(op.pop('count', 5)) if 'ttl' in op: op['ttl'] = str(op['ttl']) ret['out'] = True try: ret['message'] = jxmlease.parse(etree.tostring(conn.rpc.ping(**op))) except Exception as exception: ret['message'] = 'Execution failed due to "{0}"'.format(exception) ret['out'] = False return ret def cli(command=None, format='text', **kwargs): ''' Executes the CLI commands and returns the output in specified format. \ (default is text) The ouput can also be stored in a file. Usage: .. code-block:: bash salt 'device_name' junos.cli 'show system commit' salt 'device_name' junos.cli 'show version' dev_timeout=40 salt 'device_name' junos.cli 'show system alarms' 'xml' dest=/home/user/cli_output.txt Parameters: Required * command: The command that need to be executed on Junos CLI. (default = None) Optional * format: Format in which to get the CLI output. (text or xml, \ default = 'text') * kwargs: Keyworded arguments which can be provided like- * dev_timeout: Set NETCONF RPC timeout. Can be used for commands which take a while to execute. (default = 30 seconds) * dest: The destination file where the CLI output can be stored.\ (default = None) ''' conn = __proxy__['junos.conn']() # Cases like salt 'device_name' junos.cli 'show system alarms' '' # In this case the format becomes '' (empty string). And reply is sent in xml # We want the format to default to text. if not format: format = 'text' ret = dict() if command is None: ret['message'] = 'Please provide the CLI command to be executed.' ret['out'] = False return ret op = dict() if '__pub_arg' in kwargs: if kwargs['__pub_arg']: if isinstance(kwargs['__pub_arg'][-1], dict): op.update(kwargs['__pub_arg'][-1]) else: op.update(kwargs) try: result = conn.cli(command, format, warning=False) except Exception as exception: ret['message'] = 'Execution failed due to "{0}"'.format(exception) ret['out'] = False return ret if format == 'text': ret['message'] = result else: result = etree.tostring(result) ret['message'] = jxmlease.parse(result) if 'dest' in op and op['dest'] is not None: with fopen(op['dest'], 'w') as fp: fp.write(result) ret['out'] = True return ret def shutdown(**kwargs): ''' Shut down (power off) or reboot a device running Junos OS. This includes all Routing Engines in a Virtual Chassis or a dual Routing \ Engine system. Usage: .. code-block:: bash salt 'device_name' junos.shutdown reboot=True salt 'device_name' junos.shutdown shutdown=True in_min=10 salt 'device_name' junos.shutdown shutdown=True Parameters: Optional * kwargs: * shutdown: Set this to true if you want to shutdown the machine. (default=False, this is a safety mechanism so that the user does not accidentally shutdown the junos device.) * reboot: Whether to reboot instead of shutdown. (default=False) Note that either one of the above arguments has to be specified (shutdown or reboot) for this function to work. * at: Date and time the reboot should take place. The string must match the junos cli reboot syntax (To be used only if reboot=True) * in_min: Specify delay in minutes for shutdown ''' conn = __proxy__['junos.conn']() ret = dict() sw = SW(conn) op = dict() if '__pub_arg' in kwargs: if kwargs['__pub_arg']: if isinstance(kwargs['__pub_arg'][-1], dict): op.update(kwargs['__pub_arg'][-1]) else: op.update(kwargs) if 'shutdown' not in op and 'reboot' not in op: ret['message'] = \ 'Provide either one of the arguments: shutdown or reboot.' ret['out'] = False return ret try: if 'reboot' in op and op['reboot']: shut = sw.reboot elif 'shutdown' in op and op['shutdown']: shut = sw.poweroff else: ret['message'] = 'Nothing to be done.' ret['out'] = False return ret if 'in_min' in op: shut(in_min=op['in_min']) elif 'at' in op: shut(at=op['at']) else: shut() ret['message'] = 'Successfully powered off/rebooted.' ret['out'] = True except Exception as exception: ret['message'] = \ 'Could not poweroff/reboot beacause "{0}"'.format(exception) ret['out'] = False return ret def install_config(path=None, **kwargs): ''' Installs the given configuration file into the candidate configuration. Commits the changes if the commit checks or throws an error. Usage: .. code-block:: bash salt 'device_name' junos.install_config 'salt://production/network/routers/config.set' salt 'device_name' junos.install_config 'salt://templates/replace_config.conf' replace=True comment='Committed via SaltStack' salt 'device_name' junos.install_config 'salt://my_new_configuration.conf' dev_timeout=300 diffs_file='/salt/confs/old_config.conf' overwrite=True salt 'device_name' junos.install_config 'salt://syslog_template.conf' template_vars='{'syslog_host': '10.180.222.7'}' Parameters: Required * path: Path where the configuration/template file is present. If the file has a \ '*.conf' extension, the content is treated as text format. If the file has a '*.xml' \ extension, the content is treated as XML format. If the file has a '*.set' \ extension, the content is treated as Junos OS 'set' commands.(default = None) Optional * kwargs: Keyworded arguments which can be provided like- * dev_timeout: Set NETCONF RPC timeout. Can be used for commands which take a while to execute. (default = 30 seconds) * overwrite: Set to True if you want this file is to completely replace the\ configuration file. (default = False) * replace: Specify whether the configuration file uses "replace:" statements. Those statements under the 'replace' tag will only be changed.\ (default = False) * comment: Provide a comment to the commit. (default = None) * confirm: Provide time in minutes for commit confirmation. If this option is specified, the commit will be rollbacked in \ the given time unless the commit is confirmed. * diffs_file: Path to the file where the diff (difference in old configuration and the committed configuration) will be stored.(default = None) Note that the file will be stored on the proxy minion. To push the files to the master use the salt's following execution module: \ :py:func:`cp.push <salt.modules.cp.push>` * template_vars: Variables to be passed into the template processing engine in addition to those present in __pillar__, __opts__, __grains__, etc. You may reference these variables in your template like so: {{ template_vars["var_name"] }} ''' conn = __proxy__['junos.conn']() ret = dict() ret['out'] = True if path is None: ret['message'] = \ 'Please provide the salt path where the configuration is present' ret['out'] = False return ret op = dict() if '__pub_arg' in kwargs: if kwargs['__pub_arg']: if isinstance(kwargs['__pub_arg'][-1], dict): op.update(kwargs['__pub_arg'][-1]) else: op.update(kwargs) template_vars = dict() if "template_vars" in op: template_vars = op["template_vars"] template_cached_path = files.mkstemp() __salt__['cp.get_template']( path, template_cached_path, template_vars=template_vars) if not os.path.isfile(template_cached_path): ret['message'] = 'Invalid file path.' ret['out'] = False return ret if os.path.getsize(template_cached_path) == 0: ret['message'] = 'Template failed to render' ret['out'] = False return ret write_diff = '' if 'diffs_file' in op and op['diffs_file'] is not None: write_diff = op['diffs_file'] del op['diffs_file'] op['path'] = template_cached_path if 'format' not in op: if path.endswith('set'): template_format = 'set' elif path.endswith('xml'): template_format = 'xml' else: template_format = 'text' op['format'] = template_format if 'replace' in op and op['replace']: op['merge'] = False del op['replace'] elif 'overwrite' in op and op['overwrite']: op['overwrite'] = True elif 'overwrite' in op and not op['overwrite']: op['merge'] = True del op['overwrite'] try: conn.cu.load(**op) except Exception as exception: ret['message'] = 'Could not load configuration due to : "{0}"'.format( exception) ret['format'] = template_format ret['out'] = False return ret finally: safe_rm(template_cached_path) config_diff = conn.cu.diff() if config_diff is None: ret['message'] = 'Configuration already applied!' ret['out'] = True return ret commit_params = {} if 'confirm' in op: commit_params['confirm'] = op['confirm'] if 'comment' in op: commit_params['comment'] = op['comment'] try: check = conn.cu.commit_check() except Exception as exception: ret['message'] = \ 'Commit check threw the following exception: "{0}"'\ .format(exception) ret['out'] = False return ret if check: try: conn.cu.commit(**commit_params) ret['message'] = 'Successfully loaded and committed!' except Exception as exception: ret['message'] = \ 'Commit check successful but commit failed with "{0}"'\ .format(exception) ret['out'] = False return ret else: ret['message'] = 'Loaded configuration but commit check failed.' ret['out'] = False conn.cu.rollback() try: if write_diff and config_diff is not None: with fopen(write_diff, 'w') as fp: fp.write(config_diff) except Exception as exception: ret['message'] = 'Could not write into diffs_file due to: "{0}"'.format( exception) ret['out'] = False return ret def zeroize(): ''' Resets the device to default factory settings Usage: .. code-block:: bash salt 'device_name' junos.zeroize ''' conn = __proxy__['junos.conn']() ret = dict() ret['out'] = True try: conn.cli('request system zeroize') ret['message'] = 'Completed zeroize and rebooted' except Exception as exception: ret['message'] = 'Could not zeroize due to : "{0}"'.format(exception) ret['out'] = False return ret def install_os(path=None, **kwargs): ''' Installs the given image on the device. After the installation is complete\ the device is rebooted, if reboot=True is given as a keyworded argument. Usage: .. code-block:: bash salt 'device_name' junos.install_os 'salt://images/junos_image.tgz' reboot=True salt 'device_name' junos.install_os 'salt://junos_16_1.tgz' dev_timeout=300 Parameters: Required * path: Path where the image file is present on the proxy minion. Optional * kwargs: keyworded arguments to be given such as dev_timeout, reboot etc * dev_timeout: Set NETCONF RPC timeout. Can be used to RPCs which take a while to execute. (default = 30 seconds) * reboot: Whether to reboot after installation (default = False) * no_copy: When True the software package will not be SCP’d to the device. \ (default = False) ''' conn = __proxy__['junos.conn']() ret = dict() ret['out'] = True if path is None: ret['message'] = \ 'Please provide the salt path where the junos image is present.' ret['out'] = False return ret image_cached_path = files.mkstemp() __salt__['cp.get_template'](path, image_cached_path) if not os.path.isfile(image_cached_path): ret['message'] = 'Invalid image path.' ret['out'] = False return ret if os.path.getsize(image_cached_path) == 0: ret['message'] = 'Failed to copy image' ret['out'] = False return ret path = image_cached_path op = dict() if '__pub_arg' in kwargs: if kwargs['__pub_arg']: if isinstance(kwargs['__pub_arg'][-1], dict): op.update(kwargs['__pub_arg'][-1]) else: op.update(kwargs) try: conn.sw.install(path, progress=True) ret['message'] = 'Installed the os.' except Exception as exception: ret['message'] = 'Installation failed due to: "{0}"'.format(exception) ret['out'] = False return ret finally: safe_rm(image_cached_path) if 'reboot' in op and op['reboot'] is True: try: conn.sw.reboot() except Exception as exception: ret['message'] = \ 'Installation successful but reboot failed due to : "{0}"' \ .format(exception) ret['out'] = False return ret ret['message'] = 'Successfully installed and rebooted!' return ret def file_copy(src=None, dest=None): ''' Copies the file from the local device to the junos device. Usage: .. code-block:: bash salt 'device_name' junos.file_copy /home/m2/info.txt info_copy.txt Parameters: Required * src: The sorce path where the file is kept. * dest: The destination path where the file will be copied. ''' conn = __proxy__['junos.conn']() ret = dict() ret['out'] = True if src is None: ret['message'] = \ 'Please provide the absolute path of the file to be copied.' ret['out'] = False return ret if not os.path.isfile(src): ret['message'] = 'Invalid source file path' ret['out'] = False return ret if dest is None: ret['message'] = \ 'Please provide the absolute path of the destination where the file is to be copied.' ret['out'] = False return ret try: with SCP(conn, progress=True) as scp: scp.put(src, dest) ret['message'] = 'Successfully copied file from {0} to {1}'.format( src, dest) except Exception as exception: ret['message'] = 'Could not copy file : "{0}"'.format(exception) ret['out'] = False return ret
# -*- coding: utf-8 -*- ''' Module to interact with Junos devices. ''' from __future__ import absolute_import # Import python libraries import logging import json import os try: from lxml import etree except ImportError: from salt._compat import ElementTree as etree # Juniper interface libraries # https://github.com/Juniper/py-junos-eznc try: # pylint: disable=W0611 from jnpr.junos import Device from jnpr.junos.utils.sw import SW from jnpr.junos.utils.scp import SCP import jnpr.junos.utils import jnpr.junos.cfg import jxmlease # pylint: enable=W0611 HAS_JUNOS = True except ImportError: HAS_JUNOS = False # Import salt libraries from salt.utils import fopen from salt.utils import files from salt.utils import safe_rm # Set up logging log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'junos' __proxyenabled__ = ['junos'] def __virtual__(): ''' We need the Junos adapter libraries for this module to work. We also need a proxymodule entry in __opts__ in the opts dictionary ''' if HAS_JUNOS and 'proxy' in __opts__: return __virtualname__ else: return (False, 'The junos module could not be loaded: ' 'junos-eznc or jxmlease or proxy could not be loaded.') def facts_refresh(): ''' Reload the facts dictionary from the device. Usually only needed if, the device configuration is changed by some other actor. This function will also refresh the facts stored in the salt grains. Usage: .. code-block:: bash salt 'device_name' junos.facts_refresh ''' conn = __proxy__['junos.conn']() ret = dict() ret['out'] = True try: conn.facts_refresh() except Exception as exception: ret['message'] = 'Execution failed due to "{0}"'.format(exception) ret['out'] = False return ret ret['facts'] = __proxy__['junos.get_serialized_facts']() try: __salt__['saltutil.sync_grains']() except Exception as exception: log.error('Grains could not be updated due to "{0}"'.format(exception)) return ret def facts(): ''' Displays the facts gathered during the connection. These facts are also stored in Salt grains. Usage: .. code-block:: bash salt 'device_name' junos.facts ''' ret = dict() try: ret['facts'] = __proxy__['junos.get_serialized_facts']() ret['out'] = True except Exception as exception: ret['message'] = 'Could not display facts due to "{0}"'.format( exception) ret['out'] = False return ret def rpc(cmd=None, dest=None, format='xml', **kwargs): ''' This function executes the rpc provided as arguments on the junos device. The returned data can be stored in a file. Usage: .. code-block:: bash salt 'device' junos.rpc 'get_config' '/var/log/config.txt' 'text' filter='<configuration><system/></configuration>' salt 'device' junos.rpc 'get-interface-information' '/home/user/interface.xml' interface_name='lo0' terse=True salt 'device' junos.rpc 'get-chassis-inventory' Parameters: Required * cmd: The rpc to be executed. (default = None) Optional * dest: Destination file where the rpc ouput is stored. (default = None) Note that the file will be stored on the proxy minion. To push the files to the master use the salt's following execution module: :py:func:`cp.push <salt.modules.cp.push>` * format: The format in which the rpc reply is received from the device. (default = xml) * kwargs: keyworded arguments taken by rpc call like- * dev_timeout: Set NETCONF RPC timeout. Can be used for commands which take a while to execute. (default= 30 seconds) * filter: Only to be used with 'get-config' rpc to get specific configuration. * terse: Amount of information you want. * interface_name: Name of the interface whose information you want. ''' conn = __proxy__['junos.conn']() ret = dict() ret['out'] = True if cmd is None: ret['message'] = 'Please provide the rpc to execute.' ret['out'] = False return ret op = dict() if '__pub_arg' in kwargs: if kwargs['__pub_arg']: if isinstance(kwargs['__pub_arg'][-1], dict): op.update(kwargs['__pub_arg'][-1]) else: op.update(kwargs) op['dev_timeout'] = str(op.pop('timeout', conn.timeout)) if cmd in ['get-config', 'get_config']: filter_reply = None if 'filter' in op: filter_reply = etree.XML(op['filter']) del op['filter'] op.update({'format': format}) try: reply = getattr( conn.rpc, cmd.replace('-', '_'))(filter_reply, options=op) except Exception as exception: ret['message'] = 'RPC execution failed due to "{0}"'.format( exception) ret['out'] = False return ret else: op['dev_timeout'] = int(op['dev_timeout']) if 'filter' in op: log.warning( 'Filter ignored as it is only used with "get-config" rpc') try: reply = getattr( conn.rpc, cmd.replace('-', '_'))({'format': format}, **op) except Exception as exception: ret['message'] = 'RPC execution failed due to "{0}"'.format( exception) ret['out'] = False return ret if format == 'text': # Earlier it was ret['message'] ret['rpc_reply'] = reply.text elif format == 'json': # Earlier it was ret['message'] ret['rpc_reply'] = reply else: # Earlier it was ret['message'] ret['rpc_reply'] = jxmlease.parse(etree.tostring(reply)) if dest: if format == 'text': write_response = reply.text elif format == 'json': write_response = json.dumps(reply, indent=1) else: write_response = etree.tostring(reply) with fopen(dest, 'w') as fp: fp.write(write_response) return ret def set_hostname(hostname=None, **kwargs): ''' To set the name of the device. Usage: .. code-block:: bash salt 'device_name' junos.set_hostname salt-device Parameters: Required * hostname: The name to be set. (default = None) Optional * kwargs: Keyworded arguments which can be provided like- * dev_timeout: Set NETCONF RPC timeout. Can be used for commands which take a while to execute. (default = 30 seconds) * comment: Provide a comment to the commit. (default = None) * confirm: Provide time in minutes for commit confirmation. \ If this option is specified, the commit will be rollbacked in \ the given time unless the commit is confirmed. ''' conn = __proxy__['junos.conn']() ret = dict() if hostname is None: ret['message'] = 'Please provide the hostname.' ret['out'] = False return ret op = dict() if '__pub_arg' in kwargs: if kwargs['__pub_arg']: if isinstance(kwargs['__pub_arg'][-1], dict): op.update(kwargs['__pub_arg'][-1]) else: op.update(kwargs) # Added to recent versions of JunOs # Use text format instead set_string = 'set system host-name {0}'.format(hostname) try: conn.cu.load(set_string, format='set') except Exception as exception: ret['message'] = 'Could not load configuration due to error "{0}"'.format( exception) ret['out'] = False return ret try: commit_ok = conn.cu.commit_check() except Exception as exception: ret['message'] = 'Could not commit check due to error "{0}"'.format( exception) ret['out'] = False return ret if commit_ok: try: conn.cu.commit(**op) ret['message'] = 'Successfully changed hostname.' ret['out'] = True except Exception as exception: ret['out'] = False ret['message'] = 'Successfully loaded host-name but commit failed with "{0}"'.format( exception) return ret else: ret['out'] = False ret[ 'message'] = 'Successfully loaded host-name but pre-commit check failed.' conn.cu.rollback() return ret def commit(**kwargs): ''' To commit the changes loaded in the candidate configuration. Usage: .. code-block:: bash salt 'device_name' junos.commit comment='Commiting via saltstack' detail=True salt 'device_name' junos.commit dev_timeout=60 confirm=10 salt 'device_name' junos.commit sync=True dev_timeout=90 Parameters: Optional * kwargs: Keyworded arguments which can be provided like- * dev_timeout: Set NETCONF RPC timeout. Can be used for commands which take a \ while to execute. (default = 30 seconds) * comment: Provide a comment to the commit. (default = None) * confirm: Provide time in minutes for commit confirmation. If this option \ is specified, the commit will be rollbacked in the given time \ unless the commit is confirmed. * sync: On dual control plane systems, requests that the candidate\ configuration on one control plane be copied to the other \ control plane,checked for correct syntax, and committed on \ both Routing Engines. (default = False) * force_sync: On dual control plane systems, force the candidate configuration on one control plane to be copied to the other control plane. * full: When set to True requires all the daemons to check and evaluate \ the new configuration. * detail: When true return commit detail. ''' conn = __proxy__['junos.conn']() ret = {} op = dict() if '__pub_arg' in kwargs: if kwargs['__pub_arg']: if isinstance(kwargs['__pub_arg'][-1], dict): op.update(kwargs['__pub_arg'][-1]) else: op.update(kwargs) op['detail'] = op.get('detail', False) try: commit_ok = conn.cu.commit_check() except Exception as exception: ret['message'] = 'Could not perform commit check due to "{0}"'.format( exception) ret['out'] = False return ret if commit_ok: try: commit = conn.cu.commit(**op) ret['out'] = True if commit: if op['detail']: ret['message'] = jxmlease.parse(etree.tostring(commit)) else: ret['message'] = 'Commit Successful.' else: ret['message'] = 'Commit failed.' ret['out'] = False except Exception as exception: ret['out'] = False ret['message'] = \ 'Commit check succeeded but actual commit failed with "{0}"' \ .format(exception) else: ret['out'] = False ret['message'] = 'Pre-commit check failed.' conn.cu.rollback() return ret def rollback(id=0, **kwargs): ''' To rollback the last committed configuration changes Usage: .. code-block:: bash salt 'device_name' junos.rollback 10 Parameters: Optional * id: The rollback id value [0-49]. (default = 0) * kwargs: Keyworded arguments which can be provided like- * dev_timeout: Set NETCONF RPC timeout. Can be used for commands which take a while to execute. (default = 30 seconds) * comment: Provide a comment to the commit. (default = None) * confirm: Provide time in minutes for commit confirmation. If this option \ is specified, the commit will be rollbacked in the given time \ unless the commit is confirmed. * diffs_file: Path to the file where any diffs will be written. (default = None) ''' ret = dict() conn = __proxy__['junos.conn']() op = dict() if '__pub_arg' in kwargs: if kwargs['__pub_arg']: if isinstance(kwargs['__pub_arg'][-1], dict): op.update(kwargs['__pub_arg'][-1]) else: op.update(kwargs) try: ret['out'] = conn.cu.rollback(id) except Exception as exception: ret['message'] = 'Rollback failed due to "{0}"'.format(exception) ret['out'] = False return ret if ret['out']: ret['message'] = 'Rollback successful' else: ret['message'] = 'Rollback failed' return ret if 'diffs_file' in op and op['diffs_file'] is not None: diff = conn.cu.diff() if diff is not None: with fopen(op['diffs_file'], 'w') as fp: fp.write(diff) else: log.info( 'No diff between current configuration and \ rollbacked configuration, so no diff file created') try: commit_ok = conn.cu.commit_check() except Exception as exception: ret['message'] = 'Could not commit check due to "{0}"'.format( exception) ret['out'] = False return ret if commit_ok: try: conn.cu.commit(**op) ret['out'] = True except Exception as exception: ret['out'] = False ret['message'] = \ 'Rollback successful but commit failed with error "{0}"'\ .format(exception) return ret else: ret['message'] = 'Rollback succesfull but pre-commit check failed.' ret['out'] = False return ret def diff(id=0): ''' Gives the difference between the candidate and the current configuration. Usage: .. code-block:: bash salt 'device_name' junos.diff 3 Parameters: Optional * id: The rollback id value [0-49]. (default = 0) ''' conn = __proxy__['junos.conn']() ret = dict() ret['out'] = True try: ret['message'] = conn.cu.diff(rb_id=id) except Exception as exception: ret['message'] = 'Could not get diff with error "{0}"'.format( exception) ret['out'] = False return ret def ping(dest_ip=None, **kwargs): ''' To send ping RPC to a device. Usage: .. code-block:: bash salt 'device_name' junos.ping '8.8.8.8' count=5 salt 'device_name' junos.ping '8.8.8.8' ttl=1 rapid=True Parameters: Required * dest_ip: The IP which is to be pinged. (default = None) Optional * kwargs: Keyworded arguments which can be provided like- * dev_timeout: Set NETCONF RPC timeout. Can be used for commands which take a while to execute. (default = 30 seconds) * rapid: Setting this to True executes ping at 100pps instead of 1pps. \ (default = False) * ttl: Maximum number of IP routers (IP hops) allowed between source \ and destination. * routing_instance: Name of the routing instance to use to send the ping. * interface: Interface used to send traffic out. * count: Number of packets to send. (default = 5) ''' conn = __proxy__['junos.conn']() ret = dict() if dest_ip is None: ret['message'] = 'Please specify the destination ip to ping.' ret['out'] = False return ret op = {'host': dest_ip} if '__pub_arg' in kwargs: if kwargs['__pub_arg']: if isinstance(kwargs['__pub_arg'][-1], dict): op.update(kwargs['__pub_arg'][-1]) else: op.update(kwargs) op['count'] = str(op.pop('count', 5)) if 'ttl' in op: op['ttl'] = str(op['ttl']) ret['out'] = True try: ret['message'] = jxmlease.parse(etree.tostring(conn.rpc.ping(**op))) except Exception as exception: ret['message'] = 'Execution failed due to "{0}"'.format(exception) ret['out'] = False return ret def cli(command=None, format='text', **kwargs): ''' Executes the CLI commands and returns the output in specified format. \ (default is text) The ouput can also be stored in a file. Usage: .. code-block:: bash salt 'device_name' junos.cli 'show system commit' salt 'device_name' junos.cli 'show version' dev_timeout=40 salt 'device_name' junos.cli 'show system alarms' 'xml' dest=/home/user/cli_output.txt Parameters: Required * command: The command that need to be executed on Junos CLI. (default = None) Optional * format: Format in which to get the CLI output. (text or xml, \ default = 'text') * kwargs: Keyworded arguments which can be provided like- * dev_timeout: Set NETCONF RPC timeout. Can be used for commands which take a while to execute. (default = 30 seconds) * dest: The destination file where the CLI output can be stored.\ (default = None) ''' conn = __proxy__['junos.conn']() # Cases like salt 'device_name' junos.cli 'show system alarms' '' # In this case the format becomes '' (empty string). And reply is sent in xml # We want the format to default to text. if not format: format = 'text' ret = dict() if command is None: ret['message'] = 'Please provide the CLI command to be executed.' ret['out'] = False return ret op = dict() if '__pub_arg' in kwargs: if kwargs['__pub_arg']: if isinstance(kwargs['__pub_arg'][-1], dict): op.update(kwargs['__pub_arg'][-1]) else: op.update(kwargs) try: result = conn.cli(command, format, warning=False) except Exception as exception: ret['message'] = 'Execution failed due to "{0}"'.format(exception) ret['out'] = False return ret if format == 'text': ret['message'] = result else: result = etree.tostring(result) ret['message'] = jxmlease.parse(result) if 'dest' in op and op['dest'] is not None: with fopen(op['dest'], 'w') as fp: fp.write(result) ret['out'] = True return ret def shutdown(**kwargs): ''' Shut down (power off) or reboot a device running Junos OS. This includes all Routing Engines in a Virtual Chassis or a dual Routing \ Engine system. Usage: .. code-block:: bash salt 'device_name' junos.shutdown reboot=True salt 'device_name' junos.shutdown shutdown=True in_min=10 salt 'device_name' junos.shutdown shutdown=True Parameters: Optional * kwargs: * shutdown: Set this to true if you want to shutdown the machine. (default=False, this is a safety mechanism so that the user does not accidentally shutdown the junos device.) * reboot: Whether to reboot instead of shutdown. (default=False) Note that either one of the above arguments has to be specified (shutdown or reboot) for this function to work. * at: Date and time the reboot should take place. The string must match the junos cli reboot syntax (To be used only if reboot=True) * in_min: Specify delay in minutes for shutdown ''' conn = __proxy__['junos.conn']() ret = dict() sw = SW(conn) op = dict() if '__pub_arg' in kwargs: if kwargs['__pub_arg']: if isinstance(kwargs['__pub_arg'][-1], dict): op.update(kwargs['__pub_arg'][-1]) else: op.update(kwargs) if 'shutdown' not in op and 'reboot' not in op: ret['message'] = \ 'Provide either one of the arguments: shutdown or reboot.' ret['out'] = False return ret try: if 'reboot' in op and op['reboot']: shut = sw.reboot elif 'shutdown' in op and op['shutdown']: shut = sw.poweroff else: ret['message'] = 'Nothing to be done.' ret['out'] = False return ret if 'in_min' in op: shut(in_min=op['in_min']) elif 'at' in op: shut(at=op['at']) else: shut() ret['message'] = 'Successfully powered off/rebooted.' ret['out'] = True except Exception as exception: ret['message'] = \ 'Could not poweroff/reboot beacause "{0}"'.format(exception) ret['out'] = False return ret def install_config(path=None, **kwargs): ''' Installs the given configuration file into the candidate configuration. Commits the changes if the commit checks or throws an error. Usage: .. code-block:: bash salt 'device_name' junos.install_config 'salt://production/network/routers/config.set' salt 'device_name' junos.install_config 'salt://templates/replace_config.conf' replace=True comment='Committed via SaltStack' salt 'device_name' junos.install_config 'salt://my_new_configuration.conf' dev_timeout=300 diffs_file='/salt/confs/old_config.conf' overwrite=True salt 'device_name' junos.install_config 'salt://syslog_template.conf' template_vars='{"syslog_host": "10.180.222.7"}' Parameters: Required * path: Path where the configuration/template file is present. If the file has a \ '*.conf' extension, the content is treated as text format. If the file has a '*.xml' \ extension, the content is treated as XML format. If the file has a '*.set' \ extension, the content is treated as Junos OS 'set' commands.(default = None) Optional * kwargs: Keyworded arguments which can be provided like- * dev_timeout: Set NETCONF RPC timeout. Can be used for commands which take a while to execute. (default = 30 seconds) * overwrite: Set to True if you want this file is to completely replace the\ configuration file. (default = False) * replace: Specify whether the configuration file uses "replace:" statements. Those statements under the 'replace' tag will only be changed.\ (default = False) * comment: Provide a comment to the commit. (default = None) * confirm: Provide time in minutes for commit confirmation. If this option is specified, the commit will be rollbacked in \ the given time unless the commit is confirmed. * diffs_file: Path to the file where the diff (difference in old configuration and the committed configuration) will be stored.(default = None) Note that the file will be stored on the proxy minion. To push the files to the master use the salt's following execution module: \ :py:func:`cp.push <salt.modules.cp.push>` * template_vars: Variables to be passed into the template processing engine in addition to those present in __pillar__, __opts__, __grains__, etc. You may reference these variables in your template like so: {{ template_vars["var_name"] }} ''' conn = __proxy__['junos.conn']() ret = dict() ret['out'] = True if path is None: ret['message'] = \ 'Please provide the salt path where the configuration is present' ret['out'] = False return ret op = dict() if '__pub_arg' in kwargs: if kwargs['__pub_arg']: if isinstance(kwargs['__pub_arg'][-1], dict): op.update(kwargs['__pub_arg'][-1]) else: op.update(kwargs) template_vars = dict() if "template_vars" in op: template_vars = op["template_vars"] template_cached_path = files.mkstemp() __salt__['cp.get_template']( path, template_cached_path, template_vars=template_vars) if not os.path.isfile(template_cached_path): ret['message'] = 'Invalid file path.' ret['out'] = False return ret if os.path.getsize(template_cached_path) == 0: ret['message'] = 'Template failed to render' ret['out'] = False return ret write_diff = '' if 'diffs_file' in op and op['diffs_file'] is not None: write_diff = op['diffs_file'] del op['diffs_file'] op['path'] = template_cached_path if 'format' not in op: if path.endswith('set'): template_format = 'set' elif path.endswith('xml'): template_format = 'xml' else: template_format = 'text' op['format'] = template_format if 'replace' in op and op['replace']: op['merge'] = False del op['replace'] elif 'overwrite' in op and op['overwrite']: op['overwrite'] = True elif 'overwrite' in op and not op['overwrite']: op['merge'] = True del op['overwrite'] try: conn.cu.load(**op) except Exception as exception: ret['message'] = 'Could not load configuration due to : "{0}"'.format( exception) ret['format'] = template_format ret['out'] = False return ret finally: safe_rm(template_cached_path) config_diff = conn.cu.diff() if config_diff is None: ret['message'] = 'Configuration already applied!' ret['out'] = True return ret commit_params = {} if 'confirm' in op: commit_params['confirm'] = op['confirm'] if 'comment' in op: commit_params['comment'] = op['comment'] try: check = conn.cu.commit_check() except Exception as exception: ret['message'] = \ 'Commit check threw the following exception: "{0}"'\ .format(exception) ret['out'] = False return ret if check: try: conn.cu.commit(**commit_params) ret['message'] = 'Successfully loaded and committed!' except Exception as exception: ret['message'] = \ 'Commit check successful but commit failed with "{0}"'\ .format(exception) ret['out'] = False return ret else: ret['message'] = 'Loaded configuration but commit check failed.' ret['out'] = False conn.cu.rollback() try: if write_diff and config_diff is not None: with fopen(write_diff, 'w') as fp: fp.write(config_diff) except Exception as exception: ret['message'] = 'Could not write into diffs_file due to: "{0}"'.format( exception) ret['out'] = False return ret def zeroize(): ''' Resets the device to default factory settings Usage: .. code-block:: bash salt 'device_name' junos.zeroize ''' conn = __proxy__['junos.conn']() ret = dict() ret['out'] = True try: conn.cli('request system zeroize') ret['message'] = 'Completed zeroize and rebooted' except Exception as exception: ret['message'] = 'Could not zeroize due to : "{0}"'.format(exception) ret['out'] = False return ret def install_os(path=None, **kwargs): ''' Installs the given image on the device. After the installation is complete\ the device is rebooted, if reboot=True is given as a keyworded argument. Usage: .. code-block:: bash salt 'device_name' junos.install_os 'salt://images/junos_image.tgz' reboot=True salt 'device_name' junos.install_os 'salt://junos_16_1.tgz' dev_timeout=300 Parameters: Required * path: Path where the image file is present on the proxy minion. Optional * kwargs: keyworded arguments to be given such as dev_timeout, reboot etc * dev_timeout: Set NETCONF RPC timeout. Can be used to RPCs which take a while to execute. (default = 30 seconds) * reboot: Whether to reboot after installation (default = False) * no_copy: When True the software package will not be SCP’d to the device. \ (default = False) ''' conn = __proxy__['junos.conn']() ret = dict() ret['out'] = True if path is None: ret['message'] = \ 'Please provide the salt path where the junos image is present.' ret['out'] = False return ret image_cached_path = files.mkstemp() __salt__['cp.get_template'](path, image_cached_path) if not os.path.isfile(image_cached_path): ret['message'] = 'Invalid image path.' ret['out'] = False return ret if os.path.getsize(image_cached_path) == 0: ret['message'] = 'Failed to copy image' ret['out'] = False return ret path = image_cached_path op = dict() if '__pub_arg' in kwargs: if kwargs['__pub_arg']: if isinstance(kwargs['__pub_arg'][-1], dict): op.update(kwargs['__pub_arg'][-1]) else: op.update(kwargs) try: conn.sw.install(path, progress=True) ret['message'] = 'Installed the os.' except Exception as exception: ret['message'] = 'Installation failed due to: "{0}"'.format(exception) ret['out'] = False return ret finally: safe_rm(image_cached_path) if 'reboot' in op and op['reboot'] is True: try: conn.sw.reboot() except Exception as exception: ret['message'] = \ 'Installation successful but reboot failed due to : "{0}"' \ .format(exception) ret['out'] = False return ret ret['message'] = 'Successfully installed and rebooted!' return ret def file_copy(src=None, dest=None): ''' Copies the file from the local device to the junos device. Usage: .. code-block:: bash salt 'device_name' junos.file_copy /home/m2/info.txt info_copy.txt Parameters: Required * src: The sorce path where the file is kept. * dest: The destination path where the file will be copied. ''' conn = __proxy__['junos.conn']() ret = dict() ret['out'] = True if src is None: ret['message'] = \ 'Please provide the absolute path of the file to be copied.' ret['out'] = False return ret if not os.path.isfile(src): ret['message'] = 'Invalid source file path' ret['out'] = False return ret if dest is None: ret['message'] = \ 'Please provide the absolute path of the destination where the file is to be copied.' ret['out'] = False return ret try: with SCP(conn, progress=True) as scp: scp.put(src, dest) ret['message'] = 'Successfully copied file from {0} to {1}'.format( src, dest) except Exception as exception: ret['message'] = 'Could not copy file : "{0}"'.format(exception) ret['out'] = False return ret
import tensorflow as tf from tensorflow.python import debug as tf_debug from collections import namedtuple from .estimator import get_estimator from .input import gen_input_fn from .args import * # Make TF be quiet import os os.environ["TF_CPP_MIN_LOG_LEVEL"]="2" import logging logger = logging.getLogger(__name__) def train(args): # So I don't frigging forget what caused working models save_args(args) if args["use_tf_debug"]: hooks = [tf_debug.LocalCLIDebugHook()] else: hooks = [] train_size = sum(1 for _ in tf.python_io.tf_record_iterator(args["train_input_path"])) tf.logging.info(f"Training on {train_size} records") # ---------------------------------------------------------------------------------- training_segments = [] TrainingSegment = namedtuple('TrainingSegment', ['args', 'max_steps']) if args["use_curriculum"]: assert args["train_max_steps"] is not None, "Curriculum training requires --train-max-steps" seg_steps = args["train_max_steps"] / float(args["max_decode_iterations"]) for i in range(1, args["max_decode_iterations"]+1): seg_args = {**args} seg_args["filter_output_class"] = [str(j) for j in list(range(i+1))] total_seg_steps = i*seg_steps*1000 training_segments.append(TrainingSegment(seg_args, total_seg_steps)) else: training_segments.append(TrainingSegment(args, args["train_max_steps"]*1000 if args["train_max_steps"] is not None else None)) for i in training_segments: tf.logging.info(f"Begin training segment {i.max_steps} {i.args["filter_output_class"]}") estimator = get_estimator(i.args) train_spec = tf.estimator.TrainSpec( input_fn=gen_input_fn(i.args, "train"), max_steps=int(i.max_steps), hooks=hooks) eval_spec = tf.estimator.EvalSpec( input_fn=gen_input_fn(i.args, "eval"), throttle_secs=i.args["eval_every"]) tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec) if __name__ == "__main__": args = get_args() # DO IT! train(args)
import tensorflow as tf from tensorflow.python import debug as tf_debug from collections import namedtuple from .estimator import get_estimator from .input import gen_input_fn from .args import * # Make TF be quiet import os os.environ["TF_CPP_MIN_LOG_LEVEL"]="2" import logging logger = logging.getLogger(__name__) def train(args): # So I don't frigging forget what caused working models save_args(args) if args["use_tf_debug"]: hooks = [tf_debug.LocalCLIDebugHook()] else: hooks = [] train_size = sum(1 for _ in tf.python_io.tf_record_iterator(args["train_input_path"])) tf.logging.info(f"Training on {train_size} records") # ---------------------------------------------------------------------------------- training_segments = [] TrainingSegment = namedtuple('TrainingSegment', ['args', 'max_steps']) if args["use_curriculum"]: assert args["train_max_steps"] is not None, "Curriculum training requires --train-max-steps" seg_steps = args["train_max_steps"] / float(args["max_decode_iterations"]) for i in range(1, args["max_decode_iterations"]+1): seg_args = {**args} seg_args["filter_output_class"] = [str(j) for j in list(range(i+1))] total_seg_steps = i*seg_steps*1000 training_segments.append(TrainingSegment(seg_args, total_seg_steps)) else: training_segments.append(TrainingSegment(args, args["train_max_steps"]*1000 if args["train_max_steps"] is not None else None)) for i in training_segments: tf.logging.info(f"Begin training segment {i.max_steps} {i.args['filter_output_class']}") estimator = get_estimator(i.args) train_spec = tf.estimator.TrainSpec( input_fn=gen_input_fn(i.args, "train"), max_steps=int(i.max_steps), hooks=hooks) eval_spec = tf.estimator.EvalSpec( input_fn=gen_input_fn(i.args, "eval"), throttle_secs=i.args["eval_every"]) tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec) if __name__ == "__main__": args = get_args() # DO IT! train(args)
""" Represents a collection of trip stays and home stays. """ import csv, datetime from datetime import date, timedelta from svg_chart import SVGChart class StaysCollection: MERGE_RANGE_DAYS = 1 # Max days between trips to merge them PRINT_DATE_FORMAT = '%a %d %b %Y' def __init__(self, data_csv_path, end_date=date.today()): """ Initialize StaysCollection attributes. """ self.end_date = end_date self.grouped_trips = self.__group_trips( self.__extract_stays(data_csv_path), self.MERGE_RANGE_DAYS ) self.trip_and_home_days = self.__create_rows(self.grouped_trips) def __create_rows(self, grouped_trips): """ Create rows with trip durations, and durations of home stays betwen trips. """ rows = [] for index, stay in enumerate(grouped_trips): trip_duration = self.__duration_days_inclusive( stay['start'], stay['end'] ) home_start = stay['end'] + timedelta(days=1) if index < (len(grouped_trips) - 1): home_end = ( grouped_trips[index+1]['start'] - timedelta(days=1)) else: home_end = self.end_date home_duration = self.__duration_days_inclusive( home_start, home_end ) if home_end < home_start: home_start = None home_end = None rows.append({ 'trip': { 'start': stay['start'], 'end': stay['end'], 'duration': trip_duration }, 'home': { 'start': home_start, 'end': home_end, 'duration': home_duration } }) return rows def __duration_days_inclusive(self, start_date, end_date): """ Returns the number of inclusive days between two dates. """ return((end_date - start_date).days + 1) def __extract_stays(self, data_csv_path): """ Extract overnight stay data from a CSV file. The file should have a string City column, an integer Nights column, and a Checkout Date column (in YYYY-MM-DD format). """ with open(data_csv_path, newline='', encoding='utf-8', errors='replace') as f: csv_data = csv.DictReader(f) overnight_stays = [] for row in csv_data: end_date = datetime.datetime.strptime( row['Checkout Date'], "%Y-%m-%d").date() start_date = end_date - timedelta( days=int(row['Nights'])) overnight_stays.append({ 'city': row['City'], 'start': start_date, 'end': end_date }) return(sorted(overnight_stays, key = lambda i: i['start'])) def __format_stay(self, row_stay): """ Returns trip or home stay details as a formatted string. """ return(( f"{row_stay["start"].strftime(self.PRINT_DATE_FORMAT)} - " f"{row_stay["end"].strftime(self.PRINT_DATE_FORMAT)} " f"({row_stay["duration"]} days)" )) def __group_trips(self, overnight_stays, merge_range_days): """Group contiguous/back to back trips""" grouped_stays = [] for stay in overnight_stays: if (len(grouped_stays) == 0 or (stay['start'] - grouped_stays[-1]['end']).days > merge_range_days): # Create new trip: grouped_stays.append({ 'cities': [stay['city']], 'start': stay['start'], 'end': stay['end'] }) else: # Merge into last trip: if grouped_stays[-1]['cities'][-1] != stay['city']: grouped_stays[-1]['cities'].append(stay['city']) grouped_stays[-1]['end'] = stay['end'] return(grouped_stays) def generate_svg(self, output_path): svg = SVGChart(self.trip_and_home_days) svg.export(output_path) def print_grouped_trips(self): """ Prints all trip groups. """ for trip in self.grouped_trips: print(( f"{trip["start"].strftime(self.PRINT_DATE_FORMAT)} - " f"{trip["end"].strftime(self.PRINT_DATE_FORMAT)}" )) for city in trip['cities']: print(f" {city}") print("") def print_last_equal_or_greater_stay(self): """ Prints the most recent home stay that was at least as long as the current home stay. """ rows = self.trip_and_home_days current_home_days = rows[-1]['home']['duration'] filtered_rows = list(filter( lambda x: x['home']['duration'] >= current_home_days, rows[0:-1] )) most_recent_equal_or_greater = max( filtered_rows, key=lambda x:x['home']['start'])['home'] print( f"Most recent home stay equal to or greater than current " f"{current_home_days} days home:") print(self.__format_stay(most_recent_equal_or_greater)) print("") def print_stays(self): """ Prints all trips and home stays. """ for row in self.trip_and_home_days: print(f"Trip {self.__format_stay(row["trip"])}") if row['home']['start'] and row['home']['end']: print(f"Home {self.__format_stay(row["home"])}") else: print(f"Home ({row["home"]["duration"]} days)") print("") def print_superlative_rows(self): """ Prints the longest trip period and the longest home stay. """ max_trip = max( self.trip_and_home_days, key=lambda x:x['trip']['duration'] )['trip'] max_home = max( self.trip_and_home_days, key=lambda x:x['home']['duration'] )['home'] print(f"Max trip: {self.__format_stay(max_trip)}") print(f"Max home: {self.__format_stay(max_home)}") print("") def print_top_home_stays(self): """ Prints a ranking of the longest duration home stays, up to the rank of the current home stay. """ current_home_start = ( self.trip_and_home_days[-1]['home']['start']) home_sorted = sorted( sorted( self.trip_and_home_days, key=lambda x:x['home']['start'], reverse=True ), key=lambda x:x['home']['duration'], reverse=True ) print("Top home stays:") for index, row in enumerate(home_sorted): print(f"#{index+1}:\t{self.__format_stay(row["home"])}") if row['home']['start'] == current_home_start: break print("")
""" Represents a collection of trip stays and home stays. """ import csv, datetime from datetime import date, timedelta from svg_chart import SVGChart class StaysCollection: MERGE_RANGE_DAYS = 1 # Max days between trips to merge them PRINT_DATE_FORMAT = '%a %d %b %Y' def __init__(self, data_csv_path, end_date=date.today()): """ Initialize StaysCollection attributes. """ self.end_date = end_date self.grouped_trips = self.__group_trips( self.__extract_stays(data_csv_path), self.MERGE_RANGE_DAYS ) self.trip_and_home_days = self.__create_rows(self.grouped_trips) def __create_rows(self, grouped_trips): """ Create rows with trip durations, and durations of home stays betwen trips. """ rows = [] for index, stay in enumerate(grouped_trips): trip_duration = self.__duration_days_inclusive( stay['start'], stay['end'] ) home_start = stay['end'] + timedelta(days=1) if index < (len(grouped_trips) - 1): home_end = ( grouped_trips[index+1]['start'] - timedelta(days=1)) else: home_end = self.end_date home_duration = self.__duration_days_inclusive( home_start, home_end ) if home_end < home_start: home_start = None home_end = None rows.append({ 'trip': { 'start': stay['start'], 'end': stay['end'], 'duration': trip_duration }, 'home': { 'start': home_start, 'end': home_end, 'duration': home_duration } }) return rows def __duration_days_inclusive(self, start_date, end_date): """ Returns the number of inclusive days between two dates. """ return((end_date - start_date).days + 1) def __extract_stays(self, data_csv_path): """ Extract overnight stay data from a CSV file. The file should have a string City column, an integer Nights column, and a Checkout Date column (in YYYY-MM-DD format). """ with open(data_csv_path, newline='', encoding='utf-8', errors='replace') as f: csv_data = csv.DictReader(f) overnight_stays = [] for row in csv_data: end_date = datetime.datetime.strptime( row['Checkout Date'], "%Y-%m-%d").date() start_date = end_date - timedelta( days=int(row['Nights'])) overnight_stays.append({ 'city': row['City'], 'start': start_date, 'end': end_date }) return(sorted(overnight_stays, key = lambda i: i['start'])) def __format_stay(self, row_stay): """ Returns trip or home stay details as a formatted string. """ return(( f"{row_stay['start'].strftime(self.PRINT_DATE_FORMAT)} - " f"{row_stay['end'].strftime(self.PRINT_DATE_FORMAT)} " f"({row_stay['duration']} days)" )) def __group_trips(self, overnight_stays, merge_range_days): """Group contiguous/back to back trips""" grouped_stays = [] for stay in overnight_stays: if (len(grouped_stays) == 0 or (stay['start'] - grouped_stays[-1]['end']).days > merge_range_days): # Create new trip: grouped_stays.append({ 'cities': [stay['city']], 'start': stay['start'], 'end': stay['end'] }) else: # Merge into last trip: if grouped_stays[-1]['cities'][-1] != stay['city']: grouped_stays[-1]['cities'].append(stay['city']) grouped_stays[-1]['end'] = stay['end'] return(grouped_stays) def generate_svg(self, output_path): svg = SVGChart(self.trip_and_home_days) svg.export(output_path) def print_grouped_trips(self): """ Prints all trip groups. """ for trip in self.grouped_trips: print(( f"{trip['start'].strftime(self.PRINT_DATE_FORMAT)} - " f"{trip['end'].strftime(self.PRINT_DATE_FORMAT)}" )) for city in trip['cities']: print(f" {city}") print("") def print_last_equal_or_greater_stay(self): """ Prints the most recent home stay that was at least as long as the current home stay. """ rows = self.trip_and_home_days current_home_days = rows[-1]['home']['duration'] filtered_rows = list(filter( lambda x: x['home']['duration'] >= current_home_days, rows[0:-1] )) most_recent_equal_or_greater = max( filtered_rows, key=lambda x:x['home']['start'])['home'] print( f"Most recent home stay equal to or greater than current " f"{current_home_days} days home:") print(self.__format_stay(most_recent_equal_or_greater)) print("") def print_stays(self): """ Prints all trips and home stays. """ for row in self.trip_and_home_days: print(f"Trip {self.__format_stay(row['trip'])}") if row['home']['start'] and row['home']['end']: print(f"Home {self.__format_stay(row['home'])}") else: print(f"Home ({row['home']['duration']} days)") print("") def print_superlative_rows(self): """ Prints the longest trip period and the longest home stay. """ max_trip = max( self.trip_and_home_days, key=lambda x:x['trip']['duration'] )['trip'] max_home = max( self.trip_and_home_days, key=lambda x:x['home']['duration'] )['home'] print(f"Max trip: {self.__format_stay(max_trip)}") print(f"Max home: {self.__format_stay(max_home)}") print("") def print_top_home_stays(self): """ Prints a ranking of the longest duration home stays, up to the rank of the current home stay. """ current_home_start = ( self.trip_and_home_days[-1]['home']['start']) home_sorted = sorted( sorted( self.trip_and_home_days, key=lambda x:x['home']['start'], reverse=True ), key=lambda x:x['home']['duration'], reverse=True ) print("Top home stays:") for index, row in enumerate(home_sorted): print(f"#{index+1}:\t{self.__format_stay(row['home'])}") if row['home']['start'] == current_home_start: break print("")
from Modules import functions, data from time import time last_check_time = 0 # check_punishments unfinished async def check_punishments(guild): # iterate through each punishment, ignore if no expiration for user in functions.get_data("SELECT * FROM punishments", ()): user_id = user[0] for punishment in range(1, 4): if not user[punishment]: continue # if expired, replace punishment expiration if int(time()) >= int(user[punishment]): punish_type = data.punishment_names[punishment] functions.set_data( f"UPDATE punishments SET {punish_type + "End"} = NULL WHERE userId = (?)", (user_id, ) ) # find user, get associated role, remove and dm user target = guild.get_member(int(user_id)) role = guild.get_role(data.punishment_roles[punish_type]) target.remove_roles(role) await functions.send_embed( target, "ScriptersCF", f"Your **{punish_type}** has expired at ScriptersCF." ) # if user has no active punishments, remove row from database if not any(user[1:]): functions.set_data( "DELETE FROM punishments WHERE userId = (?)", (user_id, ) ) async def award_points(message): point_amount = 1 # check message length and get points for channel if len(message.content) >= 10: if message.channel.id in data.channel_points: point_amount = data.channel_points[message.channel.id] # update db await functions.increase_count(message.author, "point", point_amount) async def handle(message): global last_check_time await award_points(message) # if 15 seconds has passed since last message, call funcs + reset current_time = int(time()) if current_time >= last_check_time + 15: last_check_time = current_time #await check_punishments(message.guild)
from Modules import functions, data from time import time last_check_time = 0 # check_punishments unfinished async def check_punishments(guild): # iterate through each punishment, ignore if no expiration for user in functions.get_data("SELECT * FROM punishments", ()): user_id = user[0] for punishment in range(1, 4): if not user[punishment]: continue # if expired, replace punishment expiration if int(time()) >= int(user[punishment]): punish_type = data.punishment_names[punishment] functions.set_data( f"UPDATE punishments SET {punish_type + 'End'} = NULL WHERE userId = (?)", (user_id, ) ) # find user, get associated role, remove and dm user target = guild.get_member(int(user_id)) role = guild.get_role(data.punishment_roles[punish_type]) target.remove_roles(role) await functions.send_embed( target, "ScriptersCF", f"Your **{punish_type}** has expired at ScriptersCF." ) # if user has no active punishments, remove row from database if not any(user[1:]): functions.set_data( "DELETE FROM punishments WHERE userId = (?)", (user_id, ) ) async def award_points(message): point_amount = 1 # check message length and get points for channel if len(message.content) >= 10: if message.channel.id in data.channel_points: point_amount = data.channel_points[message.channel.id] # update db await functions.increase_count(message.author, "point", point_amount) async def handle(message): global last_check_time await award_points(message) # if 15 seconds has passed since last message, call funcs + reset current_time = int(time()) if current_time >= last_check_time + 15: last_check_time = current_time #await check_punishments(message.guild)
import torch.nn as nn from wilds.common.metrics.loss import ElementwiseLoss, Loss, MultiTaskLoss from wilds.common.metrics.all_metrics import MSE def initialize_loss(config, d_out): if config.get('loss_function') == 'cross_entropy': return ElementwiseLoss(loss_fn=nn.CrossEntropyLoss(reduction='none')) elif config.get('loss_function') == 'lm_cross_entropy': return MultiTaskLoss(loss_fn=nn.CrossEntropyLoss(reduction='none')) elif config.get('loss_function') == 'mse': return MSE(name='loss') elif config.get('loss_function') == 'multitask_bce': return MultiTaskLoss(loss_fn=nn.BCEWithLogitsLoss(reduction='none')) elif config.get('loss_function') == 'fasterrcnn_criterion': from models.detection.fasterrcnn import FasterRCNNLoss return ElementwiseLoss(loss_fn=FasterRCNNLoss(config.get('device'))) else: raise ValueError(f'config.get("loss_function") {config.get('loss_function')} not recognized')
import torch.nn as nn from wilds.common.metrics.loss import ElementwiseLoss, Loss, MultiTaskLoss from wilds.common.metrics.all_metrics import MSE def initialize_loss(config, d_out): if config.get('loss_function') == 'cross_entropy': return ElementwiseLoss(loss_fn=nn.CrossEntropyLoss(reduction='none')) elif config.get('loss_function') == 'lm_cross_entropy': return MultiTaskLoss(loss_fn=nn.CrossEntropyLoss(reduction='none')) elif config.get('loss_function') == 'mse': return MSE(name='loss') elif config.get('loss_function') == 'multitask_bce': return MultiTaskLoss(loss_fn=nn.BCEWithLogitsLoss(reduction='none')) elif config.get('loss_function') == 'fasterrcnn_criterion': from models.detection.fasterrcnn import FasterRCNNLoss return ElementwiseLoss(loss_fn=FasterRCNNLoss(config.get('device'))) else: raise ValueError(f'config.get("loss_function") {config.get("loss_function")} not recognized')
# coding=utf-8 # Copyright 2021 Google Research and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch BigBird model. """ import math import os from dataclasses import dataclass from typing import Optional, Tuple import numpy as np import torch import torch.utils.checkpoint from packaging import version from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...file_utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings, ) from ...modeling_outputs import ( BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, SequenceClassifierOutput, TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel, apply_chunking_to_forward from ...utils import logging from .configuration_big_bird import BigBirdConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "google/bigbird-roberta-base" _CONFIG_FOR_DOC = "BigBirdConfig" _TOKENIZER_FOR_DOC = "BigBirdTokenizer" BIG_BIRD_PRETRAINED_MODEL_ARCHIVE_LIST = [ "google/bigbird-roberta-base", "google/bigbird-roberta-large", "google/bigbird-base-trivia-itc", # See all BigBird models at https://huggingface.co/models?filter=big_bird ] _TRIVIA_QA_MAPPING = { "big_bird_attention": "attention/self", "output_layer_norm": "output/LayerNorm", "attention_output": "attention/output/dense", "output": "output/dense", "self_attention_layer_norm": "attention/output/LayerNorm", "intermediate": "intermediate/dense", "word_embeddings": "bert/embeddings/word_embeddings", "position_embedding": "bert/embeddings/position_embeddings", "type_embeddings": "bert/embeddings/token_type_embeddings", "embeddings": "bert/embeddings", "layer_normalization": "output/LayerNorm", "layer_norm": "LayerNorm", "trivia_qa_head": "qa_classifier", "dense": "intermediate/dense", "dense_1": "qa_outputs", } def load_tf_weights_in_big_bird(model, tf_checkpoint_path, is_trivia_qa=False): """Load tf checkpoints in a pytorch model.""" def load_tf_weights_bert(init_vars, tf_path): names = [] tf_weights = {} for name, shape in init_vars: array = tf.train.load_variable(tf_path, name) name = name.replace("bert/encoder/LayerNorm", "bert/embeddings/LayerNorm") logger.info(f"Loading TF weight {name} with shape {shape}") names.append(name) tf_weights[name] = array return names, tf_weights def load_tf_weights_trivia_qa(init_vars): names = [] tf_weights = {} for i, var in enumerate(init_vars): name_items = var.name.split("/") if "transformer_scaffold" in name_items[0]: layer_name_items = name_items[0].split("_") if len(layer_name_items) < 3: layer_name_items += [0] name_items[0] = f"bert/encoder/layer_{layer_name_items[2]}" name = "/".join([_TRIVIA_QA_MAPPING[x] if x in _TRIVIA_QA_MAPPING else x for x in name_items])[ :-2 ] # remove last :0 in variable if "self/attention/output" in name: name = name.replace("self/attention/output", "output") if i >= len(init_vars) - 2: name = name.replace("intermediate", "output") logger.info(f"Loading TF weight {name} with shape {var.shape}") array = var.value().numpy() names.append(name) tf_weights[name] = array return names, tf_weights try: import re import numpy as np import tensorflow as tf except ImportError: logger.error( "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions." ) raise tf_path = os.path.abspath(tf_checkpoint_path) logger.info(f"Converting TensorFlow checkpoint from {tf_path}") # Load weights from TF model init_vars = tf.saved_model.load(tf_path).variables if is_trivia_qa else tf.train.list_variables(tf_path) assert len(init_vars) > 0, "Loaded trained variables cannot be empty." pt_names = list(model.state_dict().keys()) if is_trivia_qa: names, tf_weights = load_tf_weights_trivia_qa(init_vars) else: names, tf_weights = load_tf_weights_bert(init_vars, tf_path) for txt_name in names: array = tf_weights[txt_name] name = txt_name.split("/") # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v # which are not required for using pretrained model if any( n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"] for n in name ): logger.info(f"Skipping {"/".join(name)}") continue pointer = model pt_name = [] for m_name in name: if re.fullmatch(r"[A-Za-z]+_\d+", m_name): scope_names = re.split(r"_(\d+)", m_name) else: scope_names = [m_name] if scope_names[0] == "kernel" or scope_names[0] == "gamma": pointer = getattr(pointer, "weight") pt_name.append("weight") elif scope_names[0] == "output_bias" or scope_names[0] == "beta": pointer = getattr(pointer, "bias") pt_name.append("bias") elif scope_names[0] == "output_weights": pointer = getattr(pointer, "weight") pt_name.append("weight") elif scope_names[0] == "squad": pointer = getattr(pointer, "classifier") pt_name.append("classifier") elif scope_names[0] == "transform": pointer = getattr(pointer, "transform") pt_name.append("transform") if ("bias" in name) or ("kernel" in name): pointer = getattr(pointer, "dense") pt_name.append("dense") elif ("beta" in name) or ("gamma" in name): pointer = getattr(pointer, "LayerNorm") pt_name.append("LayerNorm") else: try: pointer = getattr(pointer, scope_names[0]) pt_name.append(f"{scope_names[0]}") except AttributeError: logger.info(f"Skipping {m_name}") continue if len(scope_names) >= 2: num = int(scope_names[1]) pointer = pointer[num] pt_name.append(f"{num}") if m_name[-11:] == "_embeddings" or m_name == "embeddings": pointer = getattr(pointer, "weight") pt_name.append("weight") elif m_name == "kernel": array = np.transpose(array) try: if len(array.shape) > len(pointer.shape) and math.prod(array.shape) == math.prod(pointer.shape): # print(txt_name, array.shape) if ( txt_name.endswith("attention/self/key/kernel") or txt_name.endswith("attention/self/query/kernel") or txt_name.endswith("attention/self/value/kernel") ): array = array.transpose(1, 0, 2).reshape(pointer.shape) elif txt_name.endswith("attention/output/dense/kernel"): array = array.transpose(0, 2, 1).reshape(pointer.shape) else: array = array.reshape(pointer.shape) if pointer.shape != array.shape: raise ValueError( f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched of {txt_name}." ) except AssertionError as e: e.args += (pointer.shape, array.shape) raise pt_weight_name = ".".join(pt_name) logger.info(f"Initialize PyTorch weight {pt_weight_name} from {txt_name}.") pointer.data = torch.from_numpy(array) tf_weights.pop(txt_name, None) pt_names.remove(pt_weight_name) logger.info(f"Weights not copied to PyTorch model: {", ".join(tf_weights.keys())}.") logger.info(f"Weights not initialized in PyTorch model: {", ".join(pt_names)}.") return model class BigBirdEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" # Copied from transformers.models.bert.modeling_bert.BertEmbeddings.__init__ def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) if version.parse(torch.__version__) > version.parse("1.6.0"): self.register_buffer( "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long, device=self.position_ids.device), persistent=False, ) # End copy self.rescale_embeddings = config.rescale_embeddings self.hidden_size = config.hidden_size def forward( self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0 ): if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length] # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves # issue #5664 if token_type_ids is None: if hasattr(self, "token_type_ids"): buffered_token_type_ids = self.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) if self.rescale_embeddings: inputs_embeds = inputs_embeds * (self.hidden_size ** 0.5) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.dropout(embeddings) embeddings = self.LayerNorm(embeddings) return embeddings class BigBirdSelfAttention(nn.Module): def __init__(self, config): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias) self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias) self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.is_decoder = config.is_decoder def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, ): mixed_query_layer = self.query(hidden_states) # If this is instantiated as a cross-attention module, the keys # and values come from an encoder; the attention mask needs to be # such that the encoder's padding tokens are not attended to. is_cross_attention = encoder_hidden_states is not None if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_layer = past_key_value[0] value_layer = past_key_value[1] attention_mask = encoder_attention_mask elif is_cross_attention: key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) key_layer = torch.cat([past_key_value[0], key_layer], dim=2) value_layer = torch.cat([past_key_value[1], value_layer], dim=2) else: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_layer, value_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in BigBirdModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) if self.is_decoder: outputs = outputs + (past_key_value,) return outputs class BigBirdBlockSparseAttention(nn.Module): def __init__(self, config, seed=None): super().__init__() self.max_seqlen = config.max_position_embeddings self.seed = seed if config.hidden_size % config.num_attention_heads != 0: raise ValueError( f"The hidden size {config.hidden_size} is not a multiple of the number of attention " f"heads {config.num_attention_heads}." ) self.num_attention_heads = config.num_attention_heads self.num_random_blocks = config.num_random_blocks self.block_size = config.block_size self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias) self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias) self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states, band_mask=None, from_mask=None, to_mask=None, from_blocked_mask=None, to_blocked_mask=None, output_attentions=None, ): # Currently this `class` can't be used in decoder. batch_size, seqlen, _ = hidden_states.size() to_seq_length = from_seq_length = seqlen from_block_size = to_block_size = self.block_size assert from_seq_length % from_block_size == 0, "Query sided sequence length must be multiple of block size" assert to_seq_length % to_block_size == 0, "Key/Value sided sequence length must be multiple of block size" query_layer = self.transpose_for_scores(self.query(hidden_states)) key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) context_layer, attention_probs = self.bigbird_block_sparse_attention( query_layer, key_layer, value_layer, band_mask, from_mask, to_mask, from_blocked_mask, to_blocked_mask, self.num_attention_heads, self.num_random_blocks, self.attention_head_size, from_block_size, to_block_size, batch_size, from_seq_length, to_seq_length, seed=self.seed, plan_from_length=None, plan_num_rand_blocks=None, output_attentions=output_attentions, ) context_layer = context_layer.contiguous().view(batch_size, from_seq_length, -1) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs @staticmethod def torch_bmm_nd(inp_1, inp_2, ndim=None): """Fast nd matrix multiplication""" # faster replacement of torch.einsum ("bhqk,bhkd->bhqd") return torch.bmm(inp_1.reshape((-1,) + inp_1.shape[-2:]), inp_2.reshape((-1,) + inp_2.shape[-2:])).view( inp_1.shape[: ndim - 2] + (inp_1.shape[ndim - 2], inp_2.shape[ndim - 1]) ) @staticmethod def torch_bmm_nd_transpose(inp_1, inp_2, ndim=None): """Fast nd matrix multiplication with transpose""" # faster replacement of torch.einsum (bhqd,bhkd->bhqk) return torch.bmm( inp_1.reshape((-1,) + inp_1.shape[-2:]), inp_2.reshape((-1,) + inp_2.shape[-2:]).transpose(1, 2) ).view(inp_1.shape[: ndim - 2] + (inp_1.shape[ndim - 2], inp_2.shape[ndim - 2])) def bigbird_block_sparse_attention( self, query_layer, key_layer, value_layer, band_mask, from_mask, to_mask, from_blocked_mask, to_blocked_mask, n_heads, n_rand_blocks, attention_head_size, from_block_size, to_block_size, batch_size, from_seq_len, to_seq_len, seed, plan_from_length, plan_num_rand_blocks, output_attentions, ): # BigBird block-sparse attention as suggested in paper # ITC: # global tokens: 2 x block_size # window tokens: 3 x block_size # random tokens: num_rand_tokens x block_size # ETC: # global tokens: extra_globals_tokens + 2 x block_size # window tokens: 3 x block_size # random tokens: num_rand_tokens x block_size # Note: # 1) Currently, ETC is not supported. # 2) Window size is fixed to 3 blocks & it can be changed only by # changing `block_size`. # 3) Number of global blocks are fixed (2 blocks here) & global tokens can be # controlled only by `block_size`. # attention is calculated separately for q[0], q[1], q[2:-2], q[-2], q[-1] in order to use special trick of shifting tokens (for calculating sliding attention) # hence following code can be divided into 5 parts. if from_seq_len // from_block_size != to_seq_len // to_block_size: raise ValueError("Error the number of blocks needs to be same!") rsqrt_d = 1 / math.sqrt(attention_head_size) bsz = batch_size attn_mask_penalty = -10000.0 # generate random attention and corresponding masks np.random.seed(seed) if from_seq_len in [1024, 3072, 4096]: # old plans used in paper rand_attn = [ self._bigbird_block_rand_mask( self.max_seqlen, self.max_seqlen, from_block_size, to_block_size, n_rand_blocks, last_idx=1024 )[: (from_seq_len // from_block_size - 2)] for _ in range(n_heads) ] else: if plan_from_length is None: plan_from_length, plan_num_rand_blocks = self._get_rand_attn_plan( from_seq_len, from_block_size, n_rand_blocks ) rand_attn = self._bigbird_block_rand_mask_with_head( from_seq_length=from_seq_len, to_seq_length=to_seq_len, from_block_size=from_block_size, to_block_size=to_block_size, num_heads=n_heads, plan_from_length=plan_from_length, plan_num_rand_blocks=plan_num_rand_blocks, ) rand_attn = np.stack(rand_attn, axis=0) rand_attn = torch.tensor(rand_attn, device=query_layer.device, dtype=torch.long) rand_attn.unsqueeze_(0) rand_attn = torch.cat([rand_attn for _ in range(batch_size)], dim=0) rand_mask = self._create_rand_mask_from_inputs( from_blocked_mask, to_blocked_mask, rand_attn, n_heads, n_rand_blocks, bsz, from_seq_len, from_block_size ) blocked_query_matrix = query_layer.view(bsz, n_heads, from_seq_len // from_block_size, from_block_size, -1) blocked_key_matrix = key_layer.view(bsz, n_heads, to_seq_len // to_block_size, to_block_size, -1) blocked_value_matrix = value_layer.view(bsz, n_heads, to_seq_len // to_block_size, to_block_size, -1) # preparing block for randn attn gathered_key = self.torch_gather_b2(blocked_key_matrix, rand_attn) gathered_key = gathered_key.view( bsz, n_heads, to_seq_len // to_block_size - 2, n_rand_blocks * to_block_size, -1 ) # [bsz, n_heads, to_seq_len//to_block_size-2, n_rand_blocks, to_block_size, -1] gathered_value = self.torch_gather_b2(blocked_value_matrix, rand_attn) gathered_value = gathered_value.view( bsz, n_heads, to_seq_len // to_block_size - 2, n_rand_blocks * to_block_size, -1 ) # [bsz, n_heads, to_seq_len//to_block_size-2, n_rand_blocks, to_block_size, -1] # 1st PART # 1st block (global block) attention scores # q[0] x (k[0], k[1], k[2], k[3], k[4] .... ) # [bsz, n_heads, from_block_size, -1] x [bsz, n_heads, to_seq_len, -1] ==> [bsz, n_heads, from_block_size, to_seq_len] first_product = self.torch_bmm_nd_transpose(blocked_query_matrix[:, :, 0], key_layer, ndim=4) first_product = first_product * rsqrt_d first_product += (1.0 - to_mask) * attn_mask_penalty first_attn_weights = nn.functional.softmax( first_product, dim=-1 ) # [bsz, n_heads, from_block_size, to_seq_len] # [bsz, n_heads, from_block_size, to_seq_len] x [bsz, n_heads, to_seq_len, -1] ==> [bsz, n_heads, from_block_size, -1] first_context_layer = self.torch_bmm_nd(first_attn_weights, value_layer, ndim=4) first_context_layer.unsqueeze_(2) # 2nd PART # 2nd block attention scores # q[1] x (sliding_keys, random_keys, global_keys) # sliding key blocks -> 2nd, 3rd blocks # global key blocks -> 1st block second_key_mat = torch.cat( [ blocked_key_matrix[:, :, 0], blocked_key_matrix[:, :, 1], blocked_key_matrix[:, :, 2], blocked_key_matrix[:, :, -1], gathered_key[:, :, 0], ], dim=2, ) # [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] second_value_mat = torch.cat( [ blocked_value_matrix[:, :, 0], blocked_value_matrix[:, :, 1], blocked_value_matrix[:, :, 2], blocked_value_matrix[:, :, -1], gathered_value[:, :, 0], ], dim=2, ) # [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] # [bsz, n_heads, from_block_size, -1] x [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] ==> [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size] second_product = self.torch_bmm_nd_transpose(blocked_query_matrix[:, :, 1], second_key_mat, ndim=4) second_seq_pad = torch.cat( [ to_mask[:, :, :, : 3 * to_block_size], to_mask[:, :, :, -to_block_size:], to_mask.new_ones([bsz, 1, 1, n_rand_blocks * to_block_size]), ], dim=3, ) second_rand_pad = torch.cat( [ rand_mask.new_ones([bsz, n_heads, from_block_size, 4 * to_block_size]), rand_mask[:, :, 0], ], dim=3, ) second_product = second_product * rsqrt_d second_product += (1.0 - torch.minimum(second_seq_pad, second_rand_pad)) * attn_mask_penalty second_attn_weights = nn.functional.softmax( second_product, dim=-1 ) # [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size] # [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size] x [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] ==> [bsz, n_heads, from_block_size, -1] second_context_layer = self.torch_bmm_nd(second_attn_weights, second_value_mat, ndim=4) second_context_layer.unsqueeze_(2) # 3rd PART # Middle blocks attention scores # q[-2:2] x (sliding_keys, random_keys, global_keys) # sliding attn is calculated using special trick of shifting tokens as discussed in paper # random keys are generated by taking random indices as per `rand_attn` # global keys -> 1st & last block exp_blocked_key_matrix = torch.cat( [blocked_key_matrix[:, :, 1:-3], blocked_key_matrix[:, :, 2:-2], blocked_key_matrix[:, :, 3:-1]], dim=3 ) # [bsz, n_heads, from_seq_len//from_block_size-4, 3*to_block_size, -1] exp_blocked_value_matrix = torch.cat( [blocked_value_matrix[:, :, 1:-3], blocked_value_matrix[:, :, 2:-2], blocked_value_matrix[:, :, 3:-1]], dim=3, ) # [bsz, n_heads, from_seq_len//from_block_size-4, 3*to_block_size, -1] middle_query_matrix = blocked_query_matrix[:, :, 2:-2] # sliding attention scores for q[-2:2] # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] x [b, n_heads, from_seq_len//from_block_size-4, 3*to_block_size, -1] inner_band_product = self.torch_bmm_nd_transpose(middle_query_matrix, exp_blocked_key_matrix, ndim=5) # ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, 3*to_block_size] inner_band_product = inner_band_product * rsqrt_d # randn attention scores for q[-2:2] # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] x [bsz, n_heads, from_seq_len//from_block_size-4, n_rand_blocks*to_block_size, -1] rand_band_product = self.torch_bmm_nd_transpose(middle_query_matrix, gathered_key[:, :, 1:-1], ndim=5) # ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, n_rand_blocks*to_block_size] rand_band_product = rand_band_product * rsqrt_d # Including 1st block (since it's global) first_band_product = torch.einsum( "bhlqd,bhkd->bhlqk", middle_query_matrix, blocked_key_matrix[:, :, 0] ) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] x [bsz, n_heads, to_block_size, -1] ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, to_block_size] first_band_product = first_band_product * rsqrt_d # Including last block (since it's global) last_band_product = torch.einsum( "bhlqd,bhkd->bhlqk", middle_query_matrix, blocked_key_matrix[:, :, -1] ) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] x [bsz, n_heads, to_block_size, -1] ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, to_block_size] last_band_product = last_band_product * rsqrt_d # masking padded tokens inner_band_product += (1.0 - band_mask) * attn_mask_penalty first_band_product += (1.0 - to_mask[:, :, :, :to_block_size].unsqueeze(3)) * attn_mask_penalty last_band_product += (1.0 - to_mask[:, :, :, -to_block_size:].unsqueeze(3)) * attn_mask_penalty rand_band_product += (1.0 - rand_mask[:, :, 1:-1]) * attn_mask_penalty # completing attention scores matrix for all q[-2:2] band_product = torch.cat( [first_band_product, inner_band_product, rand_band_product, last_band_product], dim=-1 ) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, (5+n_rand_blocks)*to_block_size] # safely doing softmax since attention matrix is completed attn_weights = nn.functional.softmax( band_product, dim=-1 ) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, (5+n_rand_blocks)*to_block_size] # contribution of sliding keys # [bsz, n_heads, m//from_block_size-4, from_block_size, 3*to_block_size] x [bsz, n_heads, from_seq_len//from_block_size-4, 3*to_block_size, -1] context_layer = self.torch_bmm_nd( attn_weights[:, :, :, :, to_block_size : 4 * to_block_size], exp_blocked_value_matrix, ndim=5 ) # ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] # adding contribution of random keys # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, n_rand_blocks*to_block_size] x [bsz, n_heads, from_seq_len//from_block_size-4, n_rand_blocks*to_block_size, -1] context_layer += self.torch_bmm_nd( attn_weights[:, :, :, :, 4 * to_block_size : -to_block_size], gathered_value[:, :, 1:-1], ndim=5 ) # ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] # adding contribution of global keys context_layer += torch.einsum( "bhlqk,bhkd->bhlqd", attn_weights[:, :, :, :, :to_block_size], blocked_value_matrix[:, :, 0] ) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, to_block_size] x [bsz, n_heads, to_block_size, -1] ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] context_layer += torch.einsum( "bhlqk,bhkd->bhlqd", attn_weights[:, :, :, :, -to_block_size:], blocked_value_matrix[:, :, -1] ) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, to_block_size] x [bsz, n_heads, to_block_size, -1] ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] # 4th PART # last 2nd token attention scores # q[-2] x (sliding_keys, random_keys, global_keys) # sliding key blocks -> last 3 blocks # global key block -> 1st block # random key block -> based on indices stored in `randn_attn` second_last_key_mat = torch.cat( [ blocked_key_matrix[:, :, 0], blocked_key_matrix[:, :, -3], blocked_key_matrix[:, :, -2], blocked_key_matrix[:, :, -1], gathered_key[:, :, -1], ], dim=2, ) # [bsz, n_heads, (4+n_random_blocks)*to_block_size, -1] second_last_value_mat = torch.cat( [ blocked_value_matrix[:, :, 0], blocked_value_matrix[:, :, -3], blocked_value_matrix[:, :, -2], blocked_value_matrix[:, :, -1], gathered_value[:, :, -1], ], dim=2, ) # [bsz, n_heads, (4+r)*to_block_size, -1] # [bsz, n_heads, from_block_size, -1] x [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] ==> [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size] second_last_product = self.torch_bmm_nd_transpose(blocked_query_matrix[:, :, -2], second_last_key_mat, ndim=4) second_last_seq_pad = torch.cat( [ to_mask[:, :, :, :to_block_size], to_mask[:, :, :, -3 * to_block_size :], to_mask.new_ones([bsz, 1, 1, n_rand_blocks * to_block_size]), ], dim=3, ) second_last_rand_pad = torch.cat( [ rand_mask.new_ones([bsz, n_heads, from_block_size, 4 * to_block_size]), rand_mask[:, :, -1], ], dim=3, ) second_last_product = second_last_product * rsqrt_d second_last_product += (1.0 - torch.minimum(second_last_seq_pad, second_last_rand_pad)) * attn_mask_penalty second_last_attn_weights = nn.functional.softmax( second_last_product, dim=-1 ) # [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size] # [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size] x [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] ==> [bsz, n_heads, from_block_size, -1] second_last_context_layer = self.torch_bmm_nd(second_last_attn_weights, second_last_value_mat, ndim=4) second_last_context_layer.unsqueeze_(2) # 5th PART # last block (global) attention scores # q[-1] x (k[0], k[1], k[2], k[3], .... ) # [bsz, n_heads, from_block_size, -1] x [bsz, n_heads, to_seq_len, -1] ==> [bsz, n_heads, from_block_size, to_seq_len] last_product = self.torch_bmm_nd_transpose(blocked_query_matrix[:, :, -1], key_layer, ndim=4) last_product = last_product * rsqrt_d last_product += (1.0 - to_mask) * attn_mask_penalty last_attn_weights = nn.functional.softmax(last_product, dim=-1) # [bsz, n_heads, from_block_size, n] # [bsz, n_heads, from_block_size, to_seq_len] x [bsz, n_heads, to_seq_len, -1] ==> [bsz, n_heads, from_block_size, -1] last_context_layer = self.torch_bmm_nd(last_attn_weights, value_layer, ndim=4) last_context_layer.unsqueeze_(2) # combining representations of all tokens context_layer = torch.cat( [first_context_layer, second_context_layer, context_layer, second_last_context_layer, last_context_layer], dim=2, ) context_layer = context_layer.view((bsz, n_heads, from_seq_len, -1)) * from_mask context_layer = torch.transpose(context_layer, 1, 2) # this is just for visualizing; forward pass doesn't depend on following code if output_attentions: # TODO(PVP): need to verify if below code is correct attention_probs = torch.zeros( bsz, n_heads, from_seq_len, to_seq_len, dtype=torch.float, device=context_layer.device ) # 1st query block # corresponding to `first_context_layer` attention_probs[:, :, :from_block_size, :] = first_attn_weights # all keys global # 2nd query block # corresponding to `second_context_layer` attention_probs[:, :, from_block_size : 2 * from_block_size, : 3 * to_block_size] = second_attn_weights[ :, :, :, : 3 * to_block_size ] # 1st three key blocks (global + sliding) attention_probs[:, :, from_block_size : 2 * from_block_size, -to_block_size:] = second_attn_weights[ :, :, :, 3 * to_block_size : 4 * to_block_size ] # last key block (global) # random keys for p1, i1, w1 in zip(range(bsz), rand_attn, second_attn_weights): # p1, i1, w1 corresponds to batch_dim i.e. following operation is done for each sequence in batch for p2, i2, w2 in zip(range(n_heads), i1, w1): # p2, i2, w2 corresponds to head_dim i.e. following operation is done for each heads attn_probs_view = attention_probs.view( bsz, n_heads, from_seq_len // from_block_size, from_block_size, to_seq_len // to_block_size, to_block_size, ) right_slice = w2[:, 4 * to_block_size :] attn_probs_view[p1, p2, 1, :, i2[0]] = right_slice.view( from_block_size, n_rand_blocks, to_block_size ) # Middle query blocks # corresponding to `context_layer` # sliding keys for q_idx in range(from_seq_len // from_block_size - 4): attn_probs_view = attention_probs.view( bsz, n_heads, from_seq_len // from_block_size, from_block_size, to_seq_len // to_block_size, to_block_size, )[:, :, 2:-2, :, 1:-1, :] right_slice = attn_weights[:, :, q_idx, :, to_block_size : 4 * to_block_size] attn_probs_view[:, :, q_idx, :, q_idx : q_idx + 3, :] = right_slice.view( bsz, n_heads, from_block_size, 3, to_block_size ) # inner_band_product # global keys (corresponding to 1st key block) attention_probs[:, :, 2 * from_block_size : -2 * from_block_size, :to_block_size] = attn_weights[ :, :, :, :, :to_block_size ].view( bsz, n_heads, -1, to_block_size ) # first_band_product # global keys (corresponding to last key block) attention_probs[:, :, 2 * from_block_size : -2 * from_block_size, -to_block_size:] = attn_weights[ :, :, :, :, -to_block_size: ].view( bsz, n_heads, -1, to_block_size ) # last_band_product # random keys for p1, i1, w1 in zip(range(bsz), rand_attn, attn_weights): # p1, i1, w1 corresponds to batch_dim i.e. following operation is done for each sequence in batch for p2, i2, w2 in zip(range(n_heads), i1, w1): # p2, i2, w2 corresponds to head_dim i.e. following operation is done for each heads for q_idx in range(1, len(i2) - 1): attn_probs_view = attention_probs.view( bsz, n_heads, from_seq_len // from_block_size, from_block_size, to_seq_len // to_block_size, to_block_size, ) right_slice = w2[q_idx - 1, :, 4 * to_block_size : -to_block_size] attn_probs_view[p1, p2, q_idx + 1, :, i2[q_idx]] = right_slice.view( from_block_size, n_rand_blocks, to_block_size ) # Second-last query block # corresponding to `second_last_context_layer` attention_probs[:, :, -2 * from_block_size : -from_block_size, :to_block_size] = second_last_attn_weights[ :, :, :, :to_block_size ] # 1st key block (global) attention_probs[ :, :, -2 * from_block_size : -from_block_size, -3 * to_block_size : ] = second_last_attn_weights[ :, :, :, to_block_size : 4 * to_block_size ] # last three blocks (global + sliding) # random keys for p1, i1, w1 in zip(range(bsz), rand_attn, second_last_attn_weights): # p1, i1, w1 corresponds to batch_dim i.e. following operation is done for each sequence in batch for p2, i2, w2 in zip(range(n_heads), i1, w1): # p2, i2, w2 corresponds to head_dim i.e. following operation is done for each heads attn_probs_view = attention_probs.view( bsz, n_heads, from_seq_len // from_block_size, from_block_size, to_seq_len // to_block_size, to_block_size, ) right_slice = w2[:, 4 * to_block_size :] attn_probs_view[p1, p2, -2, :, i2[-1]] = right_slice.view( from_block_size, n_rand_blocks, to_block_size ) # last query block # corresponding to `last_context_layer` attention_probs[:, :, -from_block_size:, :] = last_attn_weights # all keys global else: attention_probs = None return context_layer, attention_probs @staticmethod def torch_gather_b2(params, indices): # this operation is equivalent to tf.gather when batch_dims=2 if params.shape[:2] != indices.shape[:2]: raise ValueError( f"Make sure that the first two dimensions of params and indices are identical, \ but they are params: {params.shape[:2]} vs. indices: {params.shape[:2]}" ) num_indices_to_gather = indices.shape[-2] * indices.shape[-1] num_indices_to_pick_from = params.shape[2] indices_shift = ( torch.arange(indices.shape[0] * indices.shape[1] * num_indices_to_gather, device=indices.device) // num_indices_to_gather * num_indices_to_pick_from ) flattened_indices = indices.view(-1) + indices_shift flattened_params = params.reshape(-1, params.shape[-2], params.shape[-1]) out_flattened = flattened_params.index_select(0, flattened_indices) out = out_flattened.reshape(params.shape[:2] + (num_indices_to_gather,) + params.shape[3:]) return out @staticmethod def _create_rand_mask_from_inputs( from_blocked_mask, to_blocked_mask, rand_attn, num_attention_heads, num_rand_blocks, batch_size, from_seq_length, from_block_size, ): """ Create 3D attention mask from a 2D tensor mask. Args: from_blocked_mask: 2D Tensor of shape [batch_size, from_seq_length//from_block_size, from_block_size]. to_blocked_mask: int32 Tensor of shape [batch_size, to_seq_length//to_block_size, to_block_size]. rand_attn: [batch_size, num_attention_heads, from_seq_length//from_block_size-2, num_rand_blocks] num_attention_heads: int. Number of attention heads. num_rand_blocks: int. Number of random chunks per row. batch_size: int. Batch size for computation. from_seq_length: int. length of from sequence. from_block_size: int. size of block in from sequence. Returns: float Tensor of shape [batch_size, num_attention_heads, from_seq_length//from_block_size-2, from_block_size, num_rand_blocks*to_block_size]. """ num_windows = from_seq_length // from_block_size - 2 rand_mask = torch.stack([p1[i1.flatten()] for p1, i1 in zip(to_blocked_mask, rand_attn)]) rand_mask = rand_mask.view(batch_size, num_attention_heads, num_windows, num_rand_blocks * from_block_size) rand_mask = torch.einsum("blq,bhlk->bhlqk", from_blocked_mask[:, 1:-1], rand_mask) return rand_mask @staticmethod def _get_rand_attn_plan(from_seq_length, from_block_size, num_rand_blocks): """ Gives the plan of where to put random attention. Args: from_seq_length: int. length of from sequence. from_block_size: int. size of block in from sequence. num_rand_blocks: int. Number of random chunks per row. Returns: plan_from_length: ending location of from block plan_num_rand_blocks: number of random ending location for each block """ plan_from_length = [] plan_num_rand_blocks = [] if (2 * num_rand_blocks + 5) < (from_seq_length // from_block_size): plan_from_length.append(int((2 * num_rand_blocks + 5) * from_block_size)) plan_num_rand_blocks.append(num_rand_blocks) plan_from_length.append(from_seq_length) plan_num_rand_blocks.append(0) elif (num_rand_blocks + 5) < (from_seq_length // from_block_size): plan_from_length.append(int((num_rand_blocks + 5) * from_block_size)) plan_num_rand_blocks.append(num_rand_blocks // 2) plan_from_length.append(from_seq_length) plan_num_rand_blocks.append(num_rand_blocks - (num_rand_blocks // 2)) else: plan_from_length.append(from_seq_length) plan_num_rand_blocks.append(num_rand_blocks) return plan_from_length, plan_num_rand_blocks @staticmethod def _bigbird_block_rand_mask( from_seq_length, to_seq_length, from_block_size, to_block_size, num_rand_blocks, last_idx=-1 ): """ Create adjacency list of random attention. Args: from_seq_length: int. length of from sequence. to_seq_length: int. length of to sequence. from_block_size: int. size of block in from sequence. to_block_size: int. size of block in to sequence. num_rand_blocks: int. Number of random chunks per row. last_idx: if -1 then num_rand_blocks blocks chosen anywhere in to sequence, if positive then num_rand_blocks blocks chosen only up to last_idx. Returns: adjacency list of size from_seq_length//from_block_size-2 by num_rand_blocks """ # using this method when from_seq_length in [1024, 3072, 4096] assert ( from_seq_length // from_block_size == to_seq_length // to_block_size ), "Error the number of blocks needs to be same!" rand_attn = np.zeros((from_seq_length // from_block_size - 2, num_rand_blocks), dtype=np.int32) middle_seq = np.arange(1, to_seq_length // to_block_size - 1, dtype=np.int32) last = to_seq_length // to_block_size - 1 if last_idx > (2 * to_block_size): last = (last_idx // to_block_size) - 1 r = num_rand_blocks # shorthand for i in range(1, from_seq_length // from_block_size - 1): start = i - 2 end = i if i == 1: rand_attn[i - 1, :] = np.random.permutation(middle_seq[2:last])[:r] elif i == 2: rand_attn[i - 1, :] = np.random.permutation(middle_seq[3:last])[:r] elif i == from_seq_length // from_block_size - 3: rand_attn[i - 1, :] = np.random.permutation(middle_seq[:last])[:r] # Missing -3: should have been sliced till last-3 elif i == from_seq_length // from_block_size - 2: rand_attn[i - 1, :] = np.random.permutation(middle_seq[:last])[:r] # Missing -4: should have been sliced till last-4 else: if start > last: start = last rand_attn[i - 1, :] = np.random.permutation(middle_seq[:start])[:r] elif (end + 1) == last: rand_attn[i - 1, :] = np.random.permutation(middle_seq[:start])[:r] else: rand_attn[i - 1, :] = np.random.permutation( np.concatenate((middle_seq[:start], middle_seq[end + 1 : last])) )[:r] return rand_attn def _bigbird_block_rand_mask_with_head( self, from_seq_length, to_seq_length, from_block_size, to_block_size, num_heads, plan_from_length, plan_num_rand_blocks, window_block_left=1, window_block_right=1, global_block_top=1, global_block_bottom=1, global_block_left=1, global_block_right=1, ): """ Create adjacency list of random attention. Args: from_seq_length: int. length of from sequence. to_seq_length: int. length of to sequence. from_block_size: int. size of block in from sequence. to_block_size: int. size of block in to sequence. num_heads: int. total number of heads. plan_from_length: list. plan from length where num_random_blocks are chosen from. plan_num_rand_blocks: list. number of rand blocks within the plan. window_block_left: int. number of blocks of window to left of a block. window_block_right: int. number of blocks of window to right of a block. global_block_top: int. number of blocks at the top. global_block_bottom: int. number of blocks at the bottom. global_block_left: int. Number of blocks globally used to the left. global_block_right: int. Number of blocks globally used to the right. Returns: adjacency list of size num_head where each element is of size from_seq_length//from_block_size-2 by num_rand_blocks """ # using this method when from_seq_length not in [1024, 3072, 4096] assert ( from_seq_length // from_block_size == to_seq_length // to_block_size ), "Error the number of blocks needs to be same!" assert from_seq_length in plan_from_length, "Error from sequence length not in plan!" # Total number of blocks in the mmask num_blocks = from_seq_length // from_block_size # Number of blocks per plan plan_block_length = np.array(plan_from_length) // from_block_size # till when to follow plan max_plan_idx = plan_from_length.index(from_seq_length) # Random Attention adjacency list rand_attn = [ np.zeros((num_blocks, np.sum(plan_num_rand_blocks[: max_plan_idx + 1])), dtype=np.int32) for i in range(num_heads) ] # We will go iteratively over the plan blocks and pick random number of # Attention blocks from the legally allowed blocks for plan_idx in range(max_plan_idx + 1): rnd_r_cnt = 0 if plan_idx > 0: # set the row for all from_blocks starting from 0 to # plan_block_length[plan_idx-1] # column indx start fromm plan_block_length[plan_idx-1] and ends at # plan_block_length[plan_idx] if plan_num_rand_blocks[plan_idx] > 0: rnd_r_cnt = int(np.sum(plan_num_rand_blocks[:plan_idx])) curr_r_cnt = int(np.sum(plan_num_rand_blocks[: plan_idx + 1])) for blk_rw_idx in range(global_block_top, plan_block_length[plan_idx - 1]): for h in range(num_heads): rand_attn[h][blk_rw_idx, rnd_r_cnt:curr_r_cnt] = self._get_single_block_row_attention( block_id=blk_rw_idx, to_start_block_id=plan_block_length[plan_idx - 1], to_end_block_id=plan_block_length[plan_idx], num_rand_blocks=plan_num_rand_blocks[plan_idx], window_block_left=window_block_left, window_block_right=window_block_right, global_block_left=global_block_left, global_block_right=global_block_right, ) for pl_id in range(plan_idx): if plan_num_rand_blocks[pl_id] == 0: continue for blk_rw_idx in range(plan_block_length[plan_idx - 1], plan_block_length[plan_idx]): rnd_r_cnt = 0 to_start_block_id = 0 if pl_id > 0: rnd_r_cnt = int(np.sum(plan_num_rand_blocks[:pl_id])) to_start_block_id = plan_block_length[pl_id - 1] curr_r_cnt = int(np.sum(plan_num_rand_blocks[: pl_id + 1])) for h in range(num_heads): rand_attn[h][blk_rw_idx, rnd_r_cnt:curr_r_cnt] = self._get_single_block_row_attention( block_id=blk_rw_idx, to_start_block_id=to_start_block_id, to_end_block_id=plan_block_length[pl_id], num_rand_blocks=plan_num_rand_blocks[pl_id], window_block_left=window_block_left, window_block_right=window_block_right, global_block_left=global_block_left, global_block_right=global_block_right, ) if plan_num_rand_blocks[plan_idx] == 0: continue curr_r_cnt = int(np.sum(plan_num_rand_blocks[: plan_idx + 1])) from_start_block_id = global_block_top to_start_block_id = 0 if plan_idx > 0: rnd_r_cnt = int(np.sum(plan_num_rand_blocks[:plan_idx])) from_start_block_id = plan_block_length[plan_idx - 1] to_start_block_id = plan_block_length[plan_idx - 1] for blk_rw_idx in range(from_start_block_id, plan_block_length[plan_idx]): for h in range(num_heads): rand_attn[h][blk_rw_idx, rnd_r_cnt:curr_r_cnt] = self._get_single_block_row_attention( block_id=blk_rw_idx, to_start_block_id=to_start_block_id, to_end_block_id=plan_block_length[plan_idx], num_rand_blocks=plan_num_rand_blocks[plan_idx], window_block_left=window_block_left, window_block_right=window_block_right, global_block_left=global_block_left, global_block_right=global_block_right, ) for nh in range(num_heads): rand_attn[nh] = rand_attn[nh][global_block_top : num_blocks - global_block_bottom, :] return rand_attn @staticmethod def _get_single_block_row_attention( block_id, to_start_block_id, to_end_block_id, num_rand_blocks, window_block_left=1, window_block_right=1, global_block_left=1, global_block_right=1, ): """ For a single row block get random row attention. Args: block_id: int. block id of row. to_start_block_id: int. random attention column start id. to_end_block_id: int. random attention column end id. num_rand_blocks: int. number of random blocks to be selected. window_block_left: int. number of blocks of window to left of a block. window_block_right: int. number of blocks of window to right of a block. global_block_left: int. Number of blocks globally used to the left. global_block_right: int. Number of blocks globally used to the right. Returns: row containing the random attention vector of size num_rand_blocks. """ # list of to_blocks from which to choose random attention to_block_list = np.arange(to_start_block_id, to_end_block_id, dtype=np.int32) # permute the blocks perm_block = np.random.permutation(to_block_list) # illegal blocks for the current block id, using window illegal_blocks = list(range(block_id - window_block_left, block_id + window_block_right + 1)) # Add blocks at the start and at the end illegal_blocks.extend(list(range(global_block_left))) illegal_blocks.extend(list(range(to_end_block_id - global_block_right, to_end_block_id))) # The second from_block cannot choose random attention on second last to_block if block_id == 1: illegal_blocks.append(to_end_block_id - 2) # The second last from_block cannot choose random attention on second to_block if block_id == to_end_block_id - 2: illegal_blocks.append(1) selected_random_blokcs = [] for i in range(to_end_block_id - to_start_block_id): if perm_block[i] not in illegal_blocks: selected_random_blokcs.append(perm_block[i]) if len(selected_random_blokcs) == num_rand_blocks: break return np.array(selected_random_blokcs, dtype=np.int32) # Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->BigBird class BigBirdSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BigBirdAttention(nn.Module): def __init__(self, config, seed=None): super().__init__() self.attention_type = config.attention_type self.config = config self.seed = seed if self.config.attention_type == "original_full": self.self = BigBirdSelfAttention(config) elif self.config.attention_type == "block_sparse": self.self = BigBirdBlockSparseAttention(config, seed) else: raise ValueError( f"attention_type can either be original_full or block_sparse, but is {self.config.attention_type}" ) self.output = BigBirdSelfOutput(config) def set_attention_type(self, value: str): if value not in ["original_full", "block_sparse"]: raise ValueError( f"attention_type can only be set to either 'original_full' or 'block_sparse', but is {value}" ) # attention type is already correctly set if value == self.attention_type: return self.attention_type = value if value == "original_full": # copy all weights to new full attention class attn_weights = BigBirdSelfAttention(self.config) else: # copy all weights to new sparse attention class attn_weights = BigBirdBlockSparseAttention(self.config, self.seed) attn_weights.query = self.self.query attn_weights.value = self.self.value attn_weights.key = self.self.key self.self = attn_weights self.attention_type = value if not self.training: self.self.eval() def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, # block_sparse config band_mask=None, from_mask=None, to_mask=None, from_blocked_mask=None, to_blocked_mask=None, ): if self.attention_type == "original_full": self_outputs = self.self( hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) else: assert ( encoder_hidden_states is None ), "BigBird cannot be used as a decoder when config.attention_type != 'original_full'" self_outputs = self.self( hidden_states, band_mask, from_mask, to_mask, from_blocked_mask, to_blocked_mask, output_attentions ) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs # Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->BigBird class BigBirdIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->BigBird class BigBirdOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BigBirdLayer(nn.Module): def __init__(self, config, seed=None): super().__init__() self.config = config self.attention_type = config.attention_type self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = BigBirdAttention(config, seed=seed) self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: assert self.is_decoder, f"{self} should be used as a decoder model if cross attention is added" self.crossattention = BigBirdAttention(config) self.intermediate = BigBirdIntermediate(config) self.output = BigBirdOutput(config) def set_attention_type(self, value: str): if value not in ["original_full", "block_sparse"]: raise ValueError( f"attention_type can only be set to either 'original_full' or 'block_sparse', but is {value}" ) # attention type is already correctly set if value == self.attention_type: return self.attention_type = value self.attention.set_attention_type(value) if self.add_cross_attention: self.crossattention.set_attention_type(value) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, band_mask=None, from_mask=None, to_mask=None, blocked_encoder_mask=None, past_key_value=None, output_attentions=False, ): # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention( hidden_states, attention_mask, head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_value=self_attn_past_key_value, output_attentions=output_attentions, band_mask=band_mask, from_mask=from_mask, to_mask=to_mask, from_blocked_mask=blocked_encoder_mask, to_blocked_mask=blocked_encoder_mask, ) attention_output = self_attention_outputs[0] # if decoder, the last output is tuple of self-attn cache if self.is_decoder: outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] else: outputs = self_attention_outputs[1:] # add self attentions if we output attention weights cross_attn_present_key_value = None if self.is_decoder and encoder_hidden_states is not None: if not hasattr(self, "crossattention"): raise ValueError( f"If `encoder_hidden_states` are passed, {self} has to be instantiated with \ cross-attention layers by setting `config.add_cross_attention=True`" ) # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None cross_attention_outputs = self.crossattention( attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, cross_attn_past_key_value, output_attentions, ) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights # add cross-attn cache to positions 3,4 of present_key_value tuple cross_attn_present_key_value = cross_attention_outputs[-1] present_key_value = present_key_value + cross_attn_present_key_value layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output ) outputs = (layer_output,) + outputs # if decoder, return the attn key/values as the last output if self.is_decoder: outputs = outputs + (present_key_value,) return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output class BigBirdEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.attention_type = config.attention_type self.layer = nn.ModuleList( [BigBirdLayer(config, seed=layer_idx) for layer_idx in range(config.num_hidden_layers)] ) def set_attention_type(self, value: str): if value not in ["original_full", "block_sparse"]: raise ValueError( f"attention_type can only be set to either 'original_full' or 'block_sparse', but is {value}" ) # attention type is already correctly set if value == self.attention_type: return self.attention_type = value for layer in self.layer: layer.set_attention_type(value) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, use_cache=None, output_attentions=False, output_hidden_states=False, band_mask=None, from_mask=None, to_mask=None, blocked_encoder_mask=None, return_dict=True, ): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None next_decoder_cache = () if use_cache else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None past_key_value = past_key_values[i] if past_key_values is not None else None if getattr(self.config, "gradient_checkpointing", False) and self.training: if use_cache: logger.warning( "`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting " "`use_cache=False`..." ) use_cache = False def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs, past_key_value, output_attentions) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(layer_module), hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, band_mask, from_mask, to_mask, blocked_encoder_mask, ) else: layer_outputs = layer_module( hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, band_mask, from_mask, to_mask, blocked_encoder_mask, past_key_value, output_attentions, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions, ] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions, ) # Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform with Bert->BigBird class BigBirdPredictionHeadTransform(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) if isinstance(config.hidden_act, str): self.transform_act_fn = ACT2FN[config.hidden_act] else: self.transform_act_fn = config.hidden_act self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->BigBird class BigBirdLMPredictionHead(nn.Module): def __init__(self, config): super().__init__() self.transform = BigBirdPredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` self.decoder.bias = self.bias def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertOnlyMLMHead with Bert->BigBird class BigBirdOnlyMLMHead(nn.Module): def __init__(self, config): super().__init__() self.predictions = BigBirdLMPredictionHead(config) def forward(self, sequence_output): prediction_scores = self.predictions(sequence_output) return prediction_scores # Copied from transformers.models.bert.modeling_bert.BertOnlyNSPHead with Bert->BigBird class BigBirdOnlyNSPHead(nn.Module): def __init__(self, config): super().__init__() self.seq_relationship = nn.Linear(config.hidden_size, 2) def forward(self, pooled_output): seq_relationship_score = self.seq_relationship(pooled_output) return seq_relationship_score # Copied from transformers.models.bert.modeling_bert.BertPreTrainingHeads with Bert->BigBird class BigBirdPreTrainingHeads(nn.Module): def __init__(self, config): super().__init__() self.predictions = BigBirdLMPredictionHead(config) self.seq_relationship = nn.Linear(config.hidden_size, 2) def forward(self, sequence_output, pooled_output): prediction_scores = self.predictions(sequence_output) seq_relationship_score = self.seq_relationship(pooled_output) return prediction_scores, seq_relationship_score class BigBirdPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = BigBirdConfig load_tf_weights = load_tf_weights_in_big_bird base_model_prefix = "bert" _keys_to_ignore_on_load_missing = [r"position_ids"] def _init_weights(self, module): """Initialize the weights""" if isinstance(module, nn.Linear): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) BIG_BIRD_START_DOCSTRING = r""" This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config (:class:`~transformers.BigBirdConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. """ BIG_BIRD_INPUTS_DOCSTRING = r""" Args: input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using :class:`transformers.BigBirdTokenizer`. See :func:`transformers.PreTrainedTokenizer.encode` and :func:`transformers.PreTrainedTokenizer.__call__` for details. `What are input IDs? <../glossary.html#input-ids>`__ attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. `What are attention masks? <../glossary.html#attention-mask>`__ token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`): Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0, 1]``: - 0 corresponds to a `sentence A` token, - 1 corresponds to a `sentence B` token. `What are token type IDs? <../glossary.html#token-type-ids>`_ position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0, config.max_position_embeddings - 1]``. `What are position IDs? <../glossary.html#position-ids>`_ head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`): Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`): Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (:obj:`bool`, `optional`): Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned tensors for more detail. output_hidden_states (:obj:`bool`, `optional`): Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for more detail. return_dict (:obj:`bool`, `optional`): Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. """ @dataclass class BigBirdForPreTrainingOutput(ModelOutput): """ Output type of :class:`~transformers.BigBirdForPreTraining`. Args: loss (`optional`, returned when ``labels`` is provided, ``torch.FloatTensor`` of shape :obj:`(1,)`): Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss. prediction_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). seq_relationship_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, 2)`): Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax). hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None prediction_logits: torch.FloatTensor = None seq_relationship_logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class BigBirdForQuestionAnsweringModelOutput(ModelOutput): """ Base class for outputs of question answering models. Args: loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided): Total span extraction loss is the sum of a Cross-Entropy for the start and end positions. start_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`): Span-start scores (before SoftMax). end_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`): Span-end scores (before SoftMax). pooler_output (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, 1)`): pooler output from BigBigModel hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None start_logits: torch.FloatTensor = None end_logits: torch.FloatTensor = None pooler_output: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None @add_start_docstrings( "The bare BigBird Model transformer outputting raw hidden-states without any specific head on top.", BIG_BIRD_START_DOCSTRING, ) class BigBirdModel(BigBirdPreTrainedModel): """ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in `Attention is all you need <https://arxiv.org/abs/1706.03762>`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. To behave as an decoder the model needs to be initialized with the :obj:`is_decoder` argument of the configuration set to :obj:`True`. To be used in a Seq2Seq model, the model needs to initialized with both :obj:`is_decoder` argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an input to the forward pass. """ def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.attention_type = self.config.attention_type self.config = config self.block_size = self.config.block_size self.embeddings = BigBirdEmbeddings(config) self.encoder = BigBirdEncoder(config) if add_pooling_layer: self.pooler = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() else: self.pooler = None self.activation = None if self.attention_type != "original_full" and config.add_cross_attention: logger.warning( "When using `BigBirdForCausalLM` as decoder, then `attention_type` must be `original_full`. Setting `attention_type=original_full`" ) self.set_attention_type("original_full") self.init_weights() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def set_attention_type(self, value: str): if value not in ["original_full", "block_sparse"]: raise ValueError( f"attention_type can only be set to either 'original_full' or 'block_sparse', but is {value}" ) # attention type is already correctly set if value == self.attention_type: return self.attention_type = value self.encoder.set_attention_type(value) @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPoolingAndCrossAttentions, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids` (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)` instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`. use_cache (:obj:`bool`, `optional`): If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up decoding (see :obj:`past_key_values`). """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if self.config.is_decoder: use_cache = use_cache if use_cache is not None else self.config.use_cache else: use_cache = False if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") batch_size, seq_length = input_shape device = input_ids.device if input_ids is not None else inputs_embeds.device # past_key_values_length past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if attention_mask is None: attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device) if token_type_ids is None: if hasattr(self.embeddings, "token_type_ids"): buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) # in order to use block_sparse attention, sequence_length has to be at least # bigger than all global attentions: 2 * block_size # + sliding tokens: 3 * block_size # + random tokens: 2 * num_random_blocks * block_size max_tokens_to_attend = (5 + 2 * self.config.num_random_blocks) * self.config.block_size if self.attention_type == "block_sparse" and seq_length <= max_tokens_to_attend: # change attention_type from block_sparse to original_full sequence_length = input_ids.size(1) if input_ids is not None else inputs_embeds.size(1) logger.warning( "Attention type 'block_sparse' is not possible if sequence_length: " f"{sequence_length} <= num global tokens: 2 * config.block_size " "+ min. num sliding tokens: 3 * config.block_size " "+ config.num_random_blocks * config.block_size " "+ additional buffer: config.num_random_blocks * config.block_size " f"= {max_tokens_to_attend} with config.block_size " f"= {self.config.block_size}, config.num_random_blocks " f"= {self.config.num_random_blocks}." "Changing attention type to 'original_full'..." ) self.set_attention_type("original_full") if self.attention_type == "block_sparse": ( padding_len, input_ids, attention_mask, token_type_ids, position_ids, inputs_embeds, ) = self._pad_to_block_size( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, pad_token_id=self.config.pad_token_id, ) else: padding_len = 0 if self.attention_type == "block_sparse": blocked_encoder_mask, band_mask, from_mask, to_mask = self.create_masks_for_block_sparse_attn( attention_mask, self.block_size ) extended_attention_mask = None elif self.attention_type == "original_full": blocked_encoder_mask = None band_mask = None from_mask = None to_mask = None # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask: torch.Tensor = self.get_extended_attention_mask( attention_mask, input_shape, device ) else: raise ValueError( f"attention_type can either be original_full or block_sparse, but is {self.attention_type}" ) # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.config.is_decoder and encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length, ) encoder_outputs = self.encoder( embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, band_mask=band_mask, from_mask=from_mask, to_mask=to_mask, blocked_encoder_mask=blocked_encoder_mask, return_dict=return_dict, ) sequence_output = encoder_outputs[0] pooler_output = self.activation(self.pooler(sequence_output[:, 0, :])) if (self.pooler is not None) else None # undo padding if padding_len > 0: # unpad `sequence_output` because the calling function is expecting a length == input_ids.size(1) sequence_output = sequence_output[:, :-padding_len] if not return_dict: return (sequence_output, pooler_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndCrossAttentions( last_hidden_state=sequence_output, pooler_output=pooler_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, ) @staticmethod def create_masks_for_block_sparse_attn(attention_mask: torch.Tensor, block_size: int): batch_size, seq_length = attention_mask.size() assert ( seq_length % block_size == 0 ), f"Sequence length must be multiple of block size, but sequence length is {seq_length}, while block size is {block_size}." def create_band_mask_from_inputs(from_blocked_mask, to_blocked_mask): """ Create 3D attention mask from a 2D tensor mask. Args: from_blocked_mask: 2D Tensor of shape [batch_size, from_seq_length//from_block_size, from_block_size]. to_blocked_mask: int32 Tensor of shape [batch_size, to_seq_length//to_block_size, to_block_size]. Returns: float Tensor of shape [batch_size, 1, from_seq_length//from_block_size-4, from_block_size, 3*to_block_size]. """ exp_blocked_to_pad = torch.cat( [to_blocked_mask[:, 1:-3], to_blocked_mask[:, 2:-2], to_blocked_mask[:, 3:-1]], dim=2 ) band_mask = torch.einsum("blq,blk->blqk", from_blocked_mask[:, 2:-2], exp_blocked_to_pad) band_mask.unsqueeze_(1) return band_mask blocked_encoder_mask = attention_mask.view(batch_size, seq_length // block_size, block_size) band_mask = create_band_mask_from_inputs(blocked_encoder_mask, blocked_encoder_mask) from_mask = attention_mask.view(batch_size, 1, seq_length, 1) to_mask = attention_mask.view(batch_size, 1, 1, seq_length) return blocked_encoder_mask, band_mask, from_mask, to_mask def _pad_to_block_size( self, input_ids: torch.Tensor, attention_mask: torch.Tensor, token_type_ids: torch.Tensor, position_ids: torch.Tensor, inputs_embeds: torch.Tensor, pad_token_id: int, ): """A helper function to pad tokens and mask to work with implementation of BigBird block-sparse attention.""" # padding block_size = self.config.block_size input_shape = input_ids.shape if input_ids is not None else inputs_embeds.shape batch_size, seq_len = input_shape[:2] padding_len = (block_size - seq_len % block_size) % block_size if padding_len > 0: logger.info( f"Input ids are automatically padded from {seq_len} to {seq_len + padding_len} to be a multiple of " f"`config.block_size`: {block_size}" ) if input_ids is not None: input_ids = nn.functional.pad(input_ids, (0, padding_len), value=pad_token_id) if position_ids is not None: # pad with position_id = pad_token_id as in modeling_bigbird.BigBirdEmbeddings position_ids = nn.functional.pad(position_ids, (0, padding_len), value=pad_token_id) if inputs_embeds is not None: input_ids_padding = inputs_embeds.new_full( (batch_size, padding_len), self.config.pad_token_id, dtype=torch.long, ) inputs_embeds_padding = self.embeddings(input_ids_padding) inputs_embeds = torch.cat([inputs_embeds, inputs_embeds_padding], dim=-2) attention_mask = nn.functional.pad( attention_mask, (0, padding_len), value=False ) # no attention on the padding tokens token_type_ids = nn.functional.pad(token_type_ids, (0, padding_len), value=0) # pad with token_type_id = 0 return padding_len, input_ids, attention_mask, token_type_ids, position_ids, inputs_embeds class BigBirdForPreTraining(BigBirdPreTrainedModel): def __init__(self, config): super().__init__(config) self.bert = BigBirdModel(config, add_pooling_layer=True) self.cls = BigBirdPreTrainingHeads(config) self.init_weights() def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=BigBirdForPreTrainingOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, next_sentence_label=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" labels (:obj:`torch.LongTensor` of shape ``(batch_size, sequence_length)``, `optional`): Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]`` next_sentence_label (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`): Labels for computing the next sequence prediction (classification) loss. If specified, nsp loss will be added to masked_lm loss. Input should be a sequence pair (see :obj:`input_ids` docstring) Indices should be in ``[0, 1]``: - 0 indicates sequence B is a continuation of sequence A, - 1 indicates sequence B is a random sequence. kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`): Used to hide legacy arguments that have been deprecated. Returns: Example:: >>> from transformers import BigBirdTokenizer, BigBirdForPreTraining >>> import torch >>> tokenizer = BigBirdTokenizer.from_pretrained('bigbird-roberta-base') >>> model = BigBirdForPreTraining.from_pretrained('bigbird-roberta-base') >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) >>> prediction_logits = outputs.prediction_logits >>> seq_relationship_logits = outputs.seq_relationship_logits """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output, pooled_output = outputs[:2] prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output) total_loss = None if labels is not None: loss_fct = CrossEntropyLoss() total_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if next_sentence_label is not None and total_loss is not None: next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1)) total_loss = total_loss + next_sentence_loss if not return_dict: output = (prediction_scores, seq_relationship_score) + outputs[2:] return ((total_loss,) + output) if total_loss is not None else output return BigBirdForPreTrainingOutput( loss=total_loss, prediction_logits=prediction_scores, seq_relationship_logits=seq_relationship_score, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings("""BigBird Model with a `language modeling` head on top. """, BIG_BIRD_START_DOCSTRING) class BigBirdForMaskedLM(BigBirdPreTrainedModel): def __init__(self, config): super().__init__(config) if config.is_decoder: logger.warning( "If you want to use `BigBirdForMaskedLM` make sure `config.is_decoder=False` for " "bi-directional self-attention." ) self.bert = BigBirdModel(config) self.cls = BigBirdOnlyMLMHead(config) self.init_weights() def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] prediction_scores = self.cls(sequence_output) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() # -100 index = padding token masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[2:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return MaskedLMOutput( loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs): input_shape = input_ids.shape effective_batch_size = input_shape[0] # add a dummy token assert self.config.pad_token_id is not None, "The PAD token should be defined for generation" attention_mask = torch.cat([attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1) dummy_token = torch.full( (effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device ) input_ids = torch.cat([input_ids, dummy_token], dim=1) return {"input_ids": input_ids, "attention_mask": attention_mask} @add_start_docstrings( """BigBird Model with a `language modeling` head on top for CLM fine-tuning. """, BIG_BIRD_START_DOCSTRING ) class BigBirdForCausalLM(BigBirdPreTrainedModel): _keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"] def __init__(self, config): super().__init__(config) if not config.is_decoder: logger.warning("If you want to use `BigBirdForCausalLM` as a standalone, add `is_decoder=True.`") self.bert = BigBirdModel(config) self.cls = BigBirdOnlyMLMHead(config) self.init_weights() def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, labels=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids` (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)` instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`. labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels n ``[0, ..., config.vocab_size]``. use_cache (:obj:`bool`, `optional`): If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up decoding (see :obj:`past_key_values`). Returns: Example:: >>> from transformers import BigBirdTokenizer, BigBirdForCausalLM, BigBirdConfig >>> import torch >>> tokenizer = BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base') >>> config = BigBirdConfig.from_pretrained("google/bigbird-base") >>> config.is_decoder = True >>> model = BigBirdForCausalLM.from_pretrained('google/bigbird-roberta-base', config=config) >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) >>> prediction_logits = outputs.logits """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] prediction_scores = self.cls(sequence_output) lm_loss = None if labels is not None: # we are doing next-token prediction; shift prediction scores and input ids by one shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous() labels = labels[:, 1:].contiguous() loss_fct = CrossEntropyLoss() lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[2:] return ((lm_loss,) + output) if lm_loss is not None else output return CausalLMOutputWithCrossAttentions( loss=lm_loss, logits=prediction_scores, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, **model_kwargs): input_shape = input_ids.shape # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly if attention_mask is None: attention_mask = input_ids.new_ones(input_shape) # cut decoder_input_ids if past is used if past is not None: input_ids = input_ids[:, -1:] return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past} def _reorder_cache(self, past, beam_idx): reordered_past = () for layer_past in past: reordered_past += ( tuple(past_state.index_select(0, beam_idx) for past_state in layer_past[:2]) + layer_past[2:], ) return reordered_past class BigBirdClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) classifier_dropout = ( config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob ) self.dropout = nn.Dropout(classifier_dropout) self.out_proj = nn.Linear(config.hidden_size, config.num_labels) self.config = config def forward(self, features, **kwargs): x = features[:, 0, :] # take <s> token (equiv. to [CLS]) x = self.dropout(x) x = self.dense(x) x = ACT2FN[self.config.hidden_act](x) x = self.dropout(x) x = self.out_proj(x) return x @add_start_docstrings( """ BigBird Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, BIG_BIRD_START_DOCSTRING, ) class BigBirdForSequenceClassification(BigBirdPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.config = config self.bert = BigBirdModel(config) self.classifier = BigBirdClassificationHead(config) self.init_weights() @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ..., config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss), If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.classifier(sequence_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ BigBird Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """, BIG_BIRD_START_DOCSTRING, ) class BigBirdForMultipleChoice(BigBirdPreTrainedModel): def __init__(self, config): super().__init__(config) self.bert = BigBirdModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) self.init_weights() @add_start_docstrings_to_model_forward( BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length") ) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Labels for computing the multiple choice classification loss. Indices should be in ``[0, ..., num_choices-1]`` where :obj:`num_choices` is the size of the second dimension of the input tensors. (See :obj:`input_ids` above) """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None inputs_embeds = ( inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None ) outputs = self.bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) if not return_dict: output = (reshaped_logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return MultipleChoiceModelOutput( loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ BigBird Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, BIG_BIRD_START_DOCSTRING, ) class BigBirdForTokenClassification(BigBirdPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.bert = BigBirdModel(config) classifier_dropout = ( config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob ) self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.init_weights() @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels - 1]``. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() # Only keep active parts of the loss if attention_mask is not None: active_loss = attention_mask.view(-1) == 1 active_logits = logits.view(-1, self.num_labels) active_labels = torch.where( active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels) ) loss = loss_fct(active_logits, active_labels) else: loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class BigBirdForQuestionAnsweringHead(nn.Module): """Head for question answering tasks.""" def __init__(self, config): super().__init__() self.dropout = nn.Dropout(config.hidden_dropout_prob) self.intermediate = BigBirdIntermediate(config) self.output = BigBirdOutput(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) def forward(self, encoder_output): hidden_states = self.dropout(encoder_output) hidden_states = self.intermediate(hidden_states) hidden_states = self.output(hidden_states, encoder_output) hidden_states = self.qa_outputs(hidden_states) return hidden_states @add_start_docstrings( """ BigBird Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """, BIG_BIRD_START_DOCSTRING, ) class BigBirdForQuestionAnswering(BigBirdPreTrainedModel): def __init__(self, config, add_pooling_layer=False): super().__init__(config) config.num_labels = 2 self.num_labels = config.num_labels self.sep_token_id = config.sep_token_id self.bert = BigBirdModel(config, add_pooling_layer=add_pooling_layer) self.qa_classifier = BigBirdForQuestionAnsweringHead(config) self.init_weights() @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="google/bigbird-base-trivia-itc", output_type=BigBirdForQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, question_lengths=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, start_positions=None, end_positions=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict seqlen = input_ids.size(1) if input_ids is not None else inputs_embeds.size(1) if question_lengths is None and input_ids is not None: # assuming input_ids format: <cls> <question> <sep> context <sep> question_lengths = torch.argmax(input_ids.eq(self.sep_token_id).int(), dim=-1) + 1 question_lengths.unsqueeze_(1) logits_mask = None if question_lengths is not None: # setting lengths logits to `-inf` logits_mask = self.prepare_question_mask(question_lengths, seqlen) if token_type_ids is None: token_type_ids = (~logits_mask).long() logits_mask = logits_mask logits_mask[:, 0] = False logits_mask.unsqueeze_(2) outputs = self.bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.qa_classifier(sequence_output) if logits_mask is not None: # removing question tokens from the competition logits = logits - logits_mask * 1e6 start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[2:] return ((total_loss,) + output) if total_loss is not None else output return BigBirdForQuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, pooler_output=outputs.pooler_output, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @staticmethod def prepare_question_mask(q_lengths: torch.Tensor, maxlen: int): # q_lengths -> (bz, 1) mask = torch.arange(0, maxlen).to(q_lengths.device) mask.unsqueeze_(0) # -> (1, maxlen) mask = mask < q_lengths return mask
# coding=utf-8 # Copyright 2021 Google Research and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch BigBird model. """ import math import os from dataclasses import dataclass from typing import Optional, Tuple import numpy as np import torch import torch.utils.checkpoint from packaging import version from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...file_utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings, ) from ...modeling_outputs import ( BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, SequenceClassifierOutput, TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel, apply_chunking_to_forward from ...utils import logging from .configuration_big_bird import BigBirdConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "google/bigbird-roberta-base" _CONFIG_FOR_DOC = "BigBirdConfig" _TOKENIZER_FOR_DOC = "BigBirdTokenizer" BIG_BIRD_PRETRAINED_MODEL_ARCHIVE_LIST = [ "google/bigbird-roberta-base", "google/bigbird-roberta-large", "google/bigbird-base-trivia-itc", # See all BigBird models at https://huggingface.co/models?filter=big_bird ] _TRIVIA_QA_MAPPING = { "big_bird_attention": "attention/self", "output_layer_norm": "output/LayerNorm", "attention_output": "attention/output/dense", "output": "output/dense", "self_attention_layer_norm": "attention/output/LayerNorm", "intermediate": "intermediate/dense", "word_embeddings": "bert/embeddings/word_embeddings", "position_embedding": "bert/embeddings/position_embeddings", "type_embeddings": "bert/embeddings/token_type_embeddings", "embeddings": "bert/embeddings", "layer_normalization": "output/LayerNorm", "layer_norm": "LayerNorm", "trivia_qa_head": "qa_classifier", "dense": "intermediate/dense", "dense_1": "qa_outputs", } def load_tf_weights_in_big_bird(model, tf_checkpoint_path, is_trivia_qa=False): """Load tf checkpoints in a pytorch model.""" def load_tf_weights_bert(init_vars, tf_path): names = [] tf_weights = {} for name, shape in init_vars: array = tf.train.load_variable(tf_path, name) name = name.replace("bert/encoder/LayerNorm", "bert/embeddings/LayerNorm") logger.info(f"Loading TF weight {name} with shape {shape}") names.append(name) tf_weights[name] = array return names, tf_weights def load_tf_weights_trivia_qa(init_vars): names = [] tf_weights = {} for i, var in enumerate(init_vars): name_items = var.name.split("/") if "transformer_scaffold" in name_items[0]: layer_name_items = name_items[0].split("_") if len(layer_name_items) < 3: layer_name_items += [0] name_items[0] = f"bert/encoder/layer_{layer_name_items[2]}" name = "/".join([_TRIVIA_QA_MAPPING[x] if x in _TRIVIA_QA_MAPPING else x for x in name_items])[ :-2 ] # remove last :0 in variable if "self/attention/output" in name: name = name.replace("self/attention/output", "output") if i >= len(init_vars) - 2: name = name.replace("intermediate", "output") logger.info(f"Loading TF weight {name} with shape {var.shape}") array = var.value().numpy() names.append(name) tf_weights[name] = array return names, tf_weights try: import re import numpy as np import tensorflow as tf except ImportError: logger.error( "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions." ) raise tf_path = os.path.abspath(tf_checkpoint_path) logger.info(f"Converting TensorFlow checkpoint from {tf_path}") # Load weights from TF model init_vars = tf.saved_model.load(tf_path).variables if is_trivia_qa else tf.train.list_variables(tf_path) assert len(init_vars) > 0, "Loaded trained variables cannot be empty." pt_names = list(model.state_dict().keys()) if is_trivia_qa: names, tf_weights = load_tf_weights_trivia_qa(init_vars) else: names, tf_weights = load_tf_weights_bert(init_vars, tf_path) for txt_name in names: array = tf_weights[txt_name] name = txt_name.split("/") # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v # which are not required for using pretrained model if any( n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"] for n in name ): logger.info(f"Skipping {'/'.join(name)}") continue pointer = model pt_name = [] for m_name in name: if re.fullmatch(r"[A-Za-z]+_\d+", m_name): scope_names = re.split(r"_(\d+)", m_name) else: scope_names = [m_name] if scope_names[0] == "kernel" or scope_names[0] == "gamma": pointer = getattr(pointer, "weight") pt_name.append("weight") elif scope_names[0] == "output_bias" or scope_names[0] == "beta": pointer = getattr(pointer, "bias") pt_name.append("bias") elif scope_names[0] == "output_weights": pointer = getattr(pointer, "weight") pt_name.append("weight") elif scope_names[0] == "squad": pointer = getattr(pointer, "classifier") pt_name.append("classifier") elif scope_names[0] == "transform": pointer = getattr(pointer, "transform") pt_name.append("transform") if ("bias" in name) or ("kernel" in name): pointer = getattr(pointer, "dense") pt_name.append("dense") elif ("beta" in name) or ("gamma" in name): pointer = getattr(pointer, "LayerNorm") pt_name.append("LayerNorm") else: try: pointer = getattr(pointer, scope_names[0]) pt_name.append(f"{scope_names[0]}") except AttributeError: logger.info(f"Skipping {m_name}") continue if len(scope_names) >= 2: num = int(scope_names[1]) pointer = pointer[num] pt_name.append(f"{num}") if m_name[-11:] == "_embeddings" or m_name == "embeddings": pointer = getattr(pointer, "weight") pt_name.append("weight") elif m_name == "kernel": array = np.transpose(array) try: if len(array.shape) > len(pointer.shape) and math.prod(array.shape) == math.prod(pointer.shape): # print(txt_name, array.shape) if ( txt_name.endswith("attention/self/key/kernel") or txt_name.endswith("attention/self/query/kernel") or txt_name.endswith("attention/self/value/kernel") ): array = array.transpose(1, 0, 2).reshape(pointer.shape) elif txt_name.endswith("attention/output/dense/kernel"): array = array.transpose(0, 2, 1).reshape(pointer.shape) else: array = array.reshape(pointer.shape) if pointer.shape != array.shape: raise ValueError( f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched of {txt_name}." ) except AssertionError as e: e.args += (pointer.shape, array.shape) raise pt_weight_name = ".".join(pt_name) logger.info(f"Initialize PyTorch weight {pt_weight_name} from {txt_name}.") pointer.data = torch.from_numpy(array) tf_weights.pop(txt_name, None) pt_names.remove(pt_weight_name) logger.info(f"Weights not copied to PyTorch model: {', '.join(tf_weights.keys())}.") logger.info(f"Weights not initialized in PyTorch model: {', '.join(pt_names)}.") return model class BigBirdEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" # Copied from transformers.models.bert.modeling_bert.BertEmbeddings.__init__ def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) if version.parse(torch.__version__) > version.parse("1.6.0"): self.register_buffer( "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long, device=self.position_ids.device), persistent=False, ) # End copy self.rescale_embeddings = config.rescale_embeddings self.hidden_size = config.hidden_size def forward( self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0 ): if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length] # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves # issue #5664 if token_type_ids is None: if hasattr(self, "token_type_ids"): buffered_token_type_ids = self.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) if self.rescale_embeddings: inputs_embeds = inputs_embeds * (self.hidden_size ** 0.5) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.dropout(embeddings) embeddings = self.LayerNorm(embeddings) return embeddings class BigBirdSelfAttention(nn.Module): def __init__(self, config): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias) self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias) self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.is_decoder = config.is_decoder def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, ): mixed_query_layer = self.query(hidden_states) # If this is instantiated as a cross-attention module, the keys # and values come from an encoder; the attention mask needs to be # such that the encoder's padding tokens are not attended to. is_cross_attention = encoder_hidden_states is not None if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_layer = past_key_value[0] value_layer = past_key_value[1] attention_mask = encoder_attention_mask elif is_cross_attention: key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) key_layer = torch.cat([past_key_value[0], key_layer], dim=2) value_layer = torch.cat([past_key_value[1], value_layer], dim=2) else: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_layer, value_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in BigBirdModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) if self.is_decoder: outputs = outputs + (past_key_value,) return outputs class BigBirdBlockSparseAttention(nn.Module): def __init__(self, config, seed=None): super().__init__() self.max_seqlen = config.max_position_embeddings self.seed = seed if config.hidden_size % config.num_attention_heads != 0: raise ValueError( f"The hidden size {config.hidden_size} is not a multiple of the number of attention " f"heads {config.num_attention_heads}." ) self.num_attention_heads = config.num_attention_heads self.num_random_blocks = config.num_random_blocks self.block_size = config.block_size self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias) self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias) self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states, band_mask=None, from_mask=None, to_mask=None, from_blocked_mask=None, to_blocked_mask=None, output_attentions=None, ): # Currently this `class` can't be used in decoder. batch_size, seqlen, _ = hidden_states.size() to_seq_length = from_seq_length = seqlen from_block_size = to_block_size = self.block_size assert from_seq_length % from_block_size == 0, "Query sided sequence length must be multiple of block size" assert to_seq_length % to_block_size == 0, "Key/Value sided sequence length must be multiple of block size" query_layer = self.transpose_for_scores(self.query(hidden_states)) key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) context_layer, attention_probs = self.bigbird_block_sparse_attention( query_layer, key_layer, value_layer, band_mask, from_mask, to_mask, from_blocked_mask, to_blocked_mask, self.num_attention_heads, self.num_random_blocks, self.attention_head_size, from_block_size, to_block_size, batch_size, from_seq_length, to_seq_length, seed=self.seed, plan_from_length=None, plan_num_rand_blocks=None, output_attentions=output_attentions, ) context_layer = context_layer.contiguous().view(batch_size, from_seq_length, -1) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs @staticmethod def torch_bmm_nd(inp_1, inp_2, ndim=None): """Fast nd matrix multiplication""" # faster replacement of torch.einsum ("bhqk,bhkd->bhqd") return torch.bmm(inp_1.reshape((-1,) + inp_1.shape[-2:]), inp_2.reshape((-1,) + inp_2.shape[-2:])).view( inp_1.shape[: ndim - 2] + (inp_1.shape[ndim - 2], inp_2.shape[ndim - 1]) ) @staticmethod def torch_bmm_nd_transpose(inp_1, inp_2, ndim=None): """Fast nd matrix multiplication with transpose""" # faster replacement of torch.einsum (bhqd,bhkd->bhqk) return torch.bmm( inp_1.reshape((-1,) + inp_1.shape[-2:]), inp_2.reshape((-1,) + inp_2.shape[-2:]).transpose(1, 2) ).view(inp_1.shape[: ndim - 2] + (inp_1.shape[ndim - 2], inp_2.shape[ndim - 2])) def bigbird_block_sparse_attention( self, query_layer, key_layer, value_layer, band_mask, from_mask, to_mask, from_blocked_mask, to_blocked_mask, n_heads, n_rand_blocks, attention_head_size, from_block_size, to_block_size, batch_size, from_seq_len, to_seq_len, seed, plan_from_length, plan_num_rand_blocks, output_attentions, ): # BigBird block-sparse attention as suggested in paper # ITC: # global tokens: 2 x block_size # window tokens: 3 x block_size # random tokens: num_rand_tokens x block_size # ETC: # global tokens: extra_globals_tokens + 2 x block_size # window tokens: 3 x block_size # random tokens: num_rand_tokens x block_size # Note: # 1) Currently, ETC is not supported. # 2) Window size is fixed to 3 blocks & it can be changed only by # changing `block_size`. # 3) Number of global blocks are fixed (2 blocks here) & global tokens can be # controlled only by `block_size`. # attention is calculated separately for q[0], q[1], q[2:-2], q[-2], q[-1] in order to use special trick of shifting tokens (for calculating sliding attention) # hence following code can be divided into 5 parts. if from_seq_len // from_block_size != to_seq_len // to_block_size: raise ValueError("Error the number of blocks needs to be same!") rsqrt_d = 1 / math.sqrt(attention_head_size) bsz = batch_size attn_mask_penalty = -10000.0 # generate random attention and corresponding masks np.random.seed(seed) if from_seq_len in [1024, 3072, 4096]: # old plans used in paper rand_attn = [ self._bigbird_block_rand_mask( self.max_seqlen, self.max_seqlen, from_block_size, to_block_size, n_rand_blocks, last_idx=1024 )[: (from_seq_len // from_block_size - 2)] for _ in range(n_heads) ] else: if plan_from_length is None: plan_from_length, plan_num_rand_blocks = self._get_rand_attn_plan( from_seq_len, from_block_size, n_rand_blocks ) rand_attn = self._bigbird_block_rand_mask_with_head( from_seq_length=from_seq_len, to_seq_length=to_seq_len, from_block_size=from_block_size, to_block_size=to_block_size, num_heads=n_heads, plan_from_length=plan_from_length, plan_num_rand_blocks=plan_num_rand_blocks, ) rand_attn = np.stack(rand_attn, axis=0) rand_attn = torch.tensor(rand_attn, device=query_layer.device, dtype=torch.long) rand_attn.unsqueeze_(0) rand_attn = torch.cat([rand_attn for _ in range(batch_size)], dim=0) rand_mask = self._create_rand_mask_from_inputs( from_blocked_mask, to_blocked_mask, rand_attn, n_heads, n_rand_blocks, bsz, from_seq_len, from_block_size ) blocked_query_matrix = query_layer.view(bsz, n_heads, from_seq_len // from_block_size, from_block_size, -1) blocked_key_matrix = key_layer.view(bsz, n_heads, to_seq_len // to_block_size, to_block_size, -1) blocked_value_matrix = value_layer.view(bsz, n_heads, to_seq_len // to_block_size, to_block_size, -1) # preparing block for randn attn gathered_key = self.torch_gather_b2(blocked_key_matrix, rand_attn) gathered_key = gathered_key.view( bsz, n_heads, to_seq_len // to_block_size - 2, n_rand_blocks * to_block_size, -1 ) # [bsz, n_heads, to_seq_len//to_block_size-2, n_rand_blocks, to_block_size, -1] gathered_value = self.torch_gather_b2(blocked_value_matrix, rand_attn) gathered_value = gathered_value.view( bsz, n_heads, to_seq_len // to_block_size - 2, n_rand_blocks * to_block_size, -1 ) # [bsz, n_heads, to_seq_len//to_block_size-2, n_rand_blocks, to_block_size, -1] # 1st PART # 1st block (global block) attention scores # q[0] x (k[0], k[1], k[2], k[3], k[4] .... ) # [bsz, n_heads, from_block_size, -1] x [bsz, n_heads, to_seq_len, -1] ==> [bsz, n_heads, from_block_size, to_seq_len] first_product = self.torch_bmm_nd_transpose(blocked_query_matrix[:, :, 0], key_layer, ndim=4) first_product = first_product * rsqrt_d first_product += (1.0 - to_mask) * attn_mask_penalty first_attn_weights = nn.functional.softmax( first_product, dim=-1 ) # [bsz, n_heads, from_block_size, to_seq_len] # [bsz, n_heads, from_block_size, to_seq_len] x [bsz, n_heads, to_seq_len, -1] ==> [bsz, n_heads, from_block_size, -1] first_context_layer = self.torch_bmm_nd(first_attn_weights, value_layer, ndim=4) first_context_layer.unsqueeze_(2) # 2nd PART # 2nd block attention scores # q[1] x (sliding_keys, random_keys, global_keys) # sliding key blocks -> 2nd, 3rd blocks # global key blocks -> 1st block second_key_mat = torch.cat( [ blocked_key_matrix[:, :, 0], blocked_key_matrix[:, :, 1], blocked_key_matrix[:, :, 2], blocked_key_matrix[:, :, -1], gathered_key[:, :, 0], ], dim=2, ) # [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] second_value_mat = torch.cat( [ blocked_value_matrix[:, :, 0], blocked_value_matrix[:, :, 1], blocked_value_matrix[:, :, 2], blocked_value_matrix[:, :, -1], gathered_value[:, :, 0], ], dim=2, ) # [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] # [bsz, n_heads, from_block_size, -1] x [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] ==> [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size] second_product = self.torch_bmm_nd_transpose(blocked_query_matrix[:, :, 1], second_key_mat, ndim=4) second_seq_pad = torch.cat( [ to_mask[:, :, :, : 3 * to_block_size], to_mask[:, :, :, -to_block_size:], to_mask.new_ones([bsz, 1, 1, n_rand_blocks * to_block_size]), ], dim=3, ) second_rand_pad = torch.cat( [ rand_mask.new_ones([bsz, n_heads, from_block_size, 4 * to_block_size]), rand_mask[:, :, 0], ], dim=3, ) second_product = second_product * rsqrt_d second_product += (1.0 - torch.minimum(second_seq_pad, second_rand_pad)) * attn_mask_penalty second_attn_weights = nn.functional.softmax( second_product, dim=-1 ) # [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size] # [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size] x [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] ==> [bsz, n_heads, from_block_size, -1] second_context_layer = self.torch_bmm_nd(second_attn_weights, second_value_mat, ndim=4) second_context_layer.unsqueeze_(2) # 3rd PART # Middle blocks attention scores # q[-2:2] x (sliding_keys, random_keys, global_keys) # sliding attn is calculated using special trick of shifting tokens as discussed in paper # random keys are generated by taking random indices as per `rand_attn` # global keys -> 1st & last block exp_blocked_key_matrix = torch.cat( [blocked_key_matrix[:, :, 1:-3], blocked_key_matrix[:, :, 2:-2], blocked_key_matrix[:, :, 3:-1]], dim=3 ) # [bsz, n_heads, from_seq_len//from_block_size-4, 3*to_block_size, -1] exp_blocked_value_matrix = torch.cat( [blocked_value_matrix[:, :, 1:-3], blocked_value_matrix[:, :, 2:-2], blocked_value_matrix[:, :, 3:-1]], dim=3, ) # [bsz, n_heads, from_seq_len//from_block_size-4, 3*to_block_size, -1] middle_query_matrix = blocked_query_matrix[:, :, 2:-2] # sliding attention scores for q[-2:2] # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] x [b, n_heads, from_seq_len//from_block_size-4, 3*to_block_size, -1] inner_band_product = self.torch_bmm_nd_transpose(middle_query_matrix, exp_blocked_key_matrix, ndim=5) # ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, 3*to_block_size] inner_band_product = inner_band_product * rsqrt_d # randn attention scores for q[-2:2] # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] x [bsz, n_heads, from_seq_len//from_block_size-4, n_rand_blocks*to_block_size, -1] rand_band_product = self.torch_bmm_nd_transpose(middle_query_matrix, gathered_key[:, :, 1:-1], ndim=5) # ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, n_rand_blocks*to_block_size] rand_band_product = rand_band_product * rsqrt_d # Including 1st block (since it's global) first_band_product = torch.einsum( "bhlqd,bhkd->bhlqk", middle_query_matrix, blocked_key_matrix[:, :, 0] ) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] x [bsz, n_heads, to_block_size, -1] ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, to_block_size] first_band_product = first_band_product * rsqrt_d # Including last block (since it's global) last_band_product = torch.einsum( "bhlqd,bhkd->bhlqk", middle_query_matrix, blocked_key_matrix[:, :, -1] ) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] x [bsz, n_heads, to_block_size, -1] ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, to_block_size] last_band_product = last_band_product * rsqrt_d # masking padded tokens inner_band_product += (1.0 - band_mask) * attn_mask_penalty first_band_product += (1.0 - to_mask[:, :, :, :to_block_size].unsqueeze(3)) * attn_mask_penalty last_band_product += (1.0 - to_mask[:, :, :, -to_block_size:].unsqueeze(3)) * attn_mask_penalty rand_band_product += (1.0 - rand_mask[:, :, 1:-1]) * attn_mask_penalty # completing attention scores matrix for all q[-2:2] band_product = torch.cat( [first_band_product, inner_band_product, rand_band_product, last_band_product], dim=-1 ) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, (5+n_rand_blocks)*to_block_size] # safely doing softmax since attention matrix is completed attn_weights = nn.functional.softmax( band_product, dim=-1 ) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, (5+n_rand_blocks)*to_block_size] # contribution of sliding keys # [bsz, n_heads, m//from_block_size-4, from_block_size, 3*to_block_size] x [bsz, n_heads, from_seq_len//from_block_size-4, 3*to_block_size, -1] context_layer = self.torch_bmm_nd( attn_weights[:, :, :, :, to_block_size : 4 * to_block_size], exp_blocked_value_matrix, ndim=5 ) # ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] # adding contribution of random keys # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, n_rand_blocks*to_block_size] x [bsz, n_heads, from_seq_len//from_block_size-4, n_rand_blocks*to_block_size, -1] context_layer += self.torch_bmm_nd( attn_weights[:, :, :, :, 4 * to_block_size : -to_block_size], gathered_value[:, :, 1:-1], ndim=5 ) # ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] # adding contribution of global keys context_layer += torch.einsum( "bhlqk,bhkd->bhlqd", attn_weights[:, :, :, :, :to_block_size], blocked_value_matrix[:, :, 0] ) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, to_block_size] x [bsz, n_heads, to_block_size, -1] ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] context_layer += torch.einsum( "bhlqk,bhkd->bhlqd", attn_weights[:, :, :, :, -to_block_size:], blocked_value_matrix[:, :, -1] ) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, to_block_size] x [bsz, n_heads, to_block_size, -1] ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] # 4th PART # last 2nd token attention scores # q[-2] x (sliding_keys, random_keys, global_keys) # sliding key blocks -> last 3 blocks # global key block -> 1st block # random key block -> based on indices stored in `randn_attn` second_last_key_mat = torch.cat( [ blocked_key_matrix[:, :, 0], blocked_key_matrix[:, :, -3], blocked_key_matrix[:, :, -2], blocked_key_matrix[:, :, -1], gathered_key[:, :, -1], ], dim=2, ) # [bsz, n_heads, (4+n_random_blocks)*to_block_size, -1] second_last_value_mat = torch.cat( [ blocked_value_matrix[:, :, 0], blocked_value_matrix[:, :, -3], blocked_value_matrix[:, :, -2], blocked_value_matrix[:, :, -1], gathered_value[:, :, -1], ], dim=2, ) # [bsz, n_heads, (4+r)*to_block_size, -1] # [bsz, n_heads, from_block_size, -1] x [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] ==> [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size] second_last_product = self.torch_bmm_nd_transpose(blocked_query_matrix[:, :, -2], second_last_key_mat, ndim=4) second_last_seq_pad = torch.cat( [ to_mask[:, :, :, :to_block_size], to_mask[:, :, :, -3 * to_block_size :], to_mask.new_ones([bsz, 1, 1, n_rand_blocks * to_block_size]), ], dim=3, ) second_last_rand_pad = torch.cat( [ rand_mask.new_ones([bsz, n_heads, from_block_size, 4 * to_block_size]), rand_mask[:, :, -1], ], dim=3, ) second_last_product = second_last_product * rsqrt_d second_last_product += (1.0 - torch.minimum(second_last_seq_pad, second_last_rand_pad)) * attn_mask_penalty second_last_attn_weights = nn.functional.softmax( second_last_product, dim=-1 ) # [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size] # [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size] x [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] ==> [bsz, n_heads, from_block_size, -1] second_last_context_layer = self.torch_bmm_nd(second_last_attn_weights, second_last_value_mat, ndim=4) second_last_context_layer.unsqueeze_(2) # 5th PART # last block (global) attention scores # q[-1] x (k[0], k[1], k[2], k[3], .... ) # [bsz, n_heads, from_block_size, -1] x [bsz, n_heads, to_seq_len, -1] ==> [bsz, n_heads, from_block_size, to_seq_len] last_product = self.torch_bmm_nd_transpose(blocked_query_matrix[:, :, -1], key_layer, ndim=4) last_product = last_product * rsqrt_d last_product += (1.0 - to_mask) * attn_mask_penalty last_attn_weights = nn.functional.softmax(last_product, dim=-1) # [bsz, n_heads, from_block_size, n] # [bsz, n_heads, from_block_size, to_seq_len] x [bsz, n_heads, to_seq_len, -1] ==> [bsz, n_heads, from_block_size, -1] last_context_layer = self.torch_bmm_nd(last_attn_weights, value_layer, ndim=4) last_context_layer.unsqueeze_(2) # combining representations of all tokens context_layer = torch.cat( [first_context_layer, second_context_layer, context_layer, second_last_context_layer, last_context_layer], dim=2, ) context_layer = context_layer.view((bsz, n_heads, from_seq_len, -1)) * from_mask context_layer = torch.transpose(context_layer, 1, 2) # this is just for visualizing; forward pass doesn't depend on following code if output_attentions: # TODO(PVP): need to verify if below code is correct attention_probs = torch.zeros( bsz, n_heads, from_seq_len, to_seq_len, dtype=torch.float, device=context_layer.device ) # 1st query block # corresponding to `first_context_layer` attention_probs[:, :, :from_block_size, :] = first_attn_weights # all keys global # 2nd query block # corresponding to `second_context_layer` attention_probs[:, :, from_block_size : 2 * from_block_size, : 3 * to_block_size] = second_attn_weights[ :, :, :, : 3 * to_block_size ] # 1st three key blocks (global + sliding) attention_probs[:, :, from_block_size : 2 * from_block_size, -to_block_size:] = second_attn_weights[ :, :, :, 3 * to_block_size : 4 * to_block_size ] # last key block (global) # random keys for p1, i1, w1 in zip(range(bsz), rand_attn, second_attn_weights): # p1, i1, w1 corresponds to batch_dim i.e. following operation is done for each sequence in batch for p2, i2, w2 in zip(range(n_heads), i1, w1): # p2, i2, w2 corresponds to head_dim i.e. following operation is done for each heads attn_probs_view = attention_probs.view( bsz, n_heads, from_seq_len // from_block_size, from_block_size, to_seq_len // to_block_size, to_block_size, ) right_slice = w2[:, 4 * to_block_size :] attn_probs_view[p1, p2, 1, :, i2[0]] = right_slice.view( from_block_size, n_rand_blocks, to_block_size ) # Middle query blocks # corresponding to `context_layer` # sliding keys for q_idx in range(from_seq_len // from_block_size - 4): attn_probs_view = attention_probs.view( bsz, n_heads, from_seq_len // from_block_size, from_block_size, to_seq_len // to_block_size, to_block_size, )[:, :, 2:-2, :, 1:-1, :] right_slice = attn_weights[:, :, q_idx, :, to_block_size : 4 * to_block_size] attn_probs_view[:, :, q_idx, :, q_idx : q_idx + 3, :] = right_slice.view( bsz, n_heads, from_block_size, 3, to_block_size ) # inner_band_product # global keys (corresponding to 1st key block) attention_probs[:, :, 2 * from_block_size : -2 * from_block_size, :to_block_size] = attn_weights[ :, :, :, :, :to_block_size ].view( bsz, n_heads, -1, to_block_size ) # first_band_product # global keys (corresponding to last key block) attention_probs[:, :, 2 * from_block_size : -2 * from_block_size, -to_block_size:] = attn_weights[ :, :, :, :, -to_block_size: ].view( bsz, n_heads, -1, to_block_size ) # last_band_product # random keys for p1, i1, w1 in zip(range(bsz), rand_attn, attn_weights): # p1, i1, w1 corresponds to batch_dim i.e. following operation is done for each sequence in batch for p2, i2, w2 in zip(range(n_heads), i1, w1): # p2, i2, w2 corresponds to head_dim i.e. following operation is done for each heads for q_idx in range(1, len(i2) - 1): attn_probs_view = attention_probs.view( bsz, n_heads, from_seq_len // from_block_size, from_block_size, to_seq_len // to_block_size, to_block_size, ) right_slice = w2[q_idx - 1, :, 4 * to_block_size : -to_block_size] attn_probs_view[p1, p2, q_idx + 1, :, i2[q_idx]] = right_slice.view( from_block_size, n_rand_blocks, to_block_size ) # Second-last query block # corresponding to `second_last_context_layer` attention_probs[:, :, -2 * from_block_size : -from_block_size, :to_block_size] = second_last_attn_weights[ :, :, :, :to_block_size ] # 1st key block (global) attention_probs[ :, :, -2 * from_block_size : -from_block_size, -3 * to_block_size : ] = second_last_attn_weights[ :, :, :, to_block_size : 4 * to_block_size ] # last three blocks (global + sliding) # random keys for p1, i1, w1 in zip(range(bsz), rand_attn, second_last_attn_weights): # p1, i1, w1 corresponds to batch_dim i.e. following operation is done for each sequence in batch for p2, i2, w2 in zip(range(n_heads), i1, w1): # p2, i2, w2 corresponds to head_dim i.e. following operation is done for each heads attn_probs_view = attention_probs.view( bsz, n_heads, from_seq_len // from_block_size, from_block_size, to_seq_len // to_block_size, to_block_size, ) right_slice = w2[:, 4 * to_block_size :] attn_probs_view[p1, p2, -2, :, i2[-1]] = right_slice.view( from_block_size, n_rand_blocks, to_block_size ) # last query block # corresponding to `last_context_layer` attention_probs[:, :, -from_block_size:, :] = last_attn_weights # all keys global else: attention_probs = None return context_layer, attention_probs @staticmethod def torch_gather_b2(params, indices): # this operation is equivalent to tf.gather when batch_dims=2 if params.shape[:2] != indices.shape[:2]: raise ValueError( f"Make sure that the first two dimensions of params and indices are identical, \ but they are params: {params.shape[:2]} vs. indices: {params.shape[:2]}" ) num_indices_to_gather = indices.shape[-2] * indices.shape[-1] num_indices_to_pick_from = params.shape[2] indices_shift = ( torch.arange(indices.shape[0] * indices.shape[1] * num_indices_to_gather, device=indices.device) // num_indices_to_gather * num_indices_to_pick_from ) flattened_indices = indices.view(-1) + indices_shift flattened_params = params.reshape(-1, params.shape[-2], params.shape[-1]) out_flattened = flattened_params.index_select(0, flattened_indices) out = out_flattened.reshape(params.shape[:2] + (num_indices_to_gather,) + params.shape[3:]) return out @staticmethod def _create_rand_mask_from_inputs( from_blocked_mask, to_blocked_mask, rand_attn, num_attention_heads, num_rand_blocks, batch_size, from_seq_length, from_block_size, ): """ Create 3D attention mask from a 2D tensor mask. Args: from_blocked_mask: 2D Tensor of shape [batch_size, from_seq_length//from_block_size, from_block_size]. to_blocked_mask: int32 Tensor of shape [batch_size, to_seq_length//to_block_size, to_block_size]. rand_attn: [batch_size, num_attention_heads, from_seq_length//from_block_size-2, num_rand_blocks] num_attention_heads: int. Number of attention heads. num_rand_blocks: int. Number of random chunks per row. batch_size: int. Batch size for computation. from_seq_length: int. length of from sequence. from_block_size: int. size of block in from sequence. Returns: float Tensor of shape [batch_size, num_attention_heads, from_seq_length//from_block_size-2, from_block_size, num_rand_blocks*to_block_size]. """ num_windows = from_seq_length // from_block_size - 2 rand_mask = torch.stack([p1[i1.flatten()] for p1, i1 in zip(to_blocked_mask, rand_attn)]) rand_mask = rand_mask.view(batch_size, num_attention_heads, num_windows, num_rand_blocks * from_block_size) rand_mask = torch.einsum("blq,bhlk->bhlqk", from_blocked_mask[:, 1:-1], rand_mask) return rand_mask @staticmethod def _get_rand_attn_plan(from_seq_length, from_block_size, num_rand_blocks): """ Gives the plan of where to put random attention. Args: from_seq_length: int. length of from sequence. from_block_size: int. size of block in from sequence. num_rand_blocks: int. Number of random chunks per row. Returns: plan_from_length: ending location of from block plan_num_rand_blocks: number of random ending location for each block """ plan_from_length = [] plan_num_rand_blocks = [] if (2 * num_rand_blocks + 5) < (from_seq_length // from_block_size): plan_from_length.append(int((2 * num_rand_blocks + 5) * from_block_size)) plan_num_rand_blocks.append(num_rand_blocks) plan_from_length.append(from_seq_length) plan_num_rand_blocks.append(0) elif (num_rand_blocks + 5) < (from_seq_length // from_block_size): plan_from_length.append(int((num_rand_blocks + 5) * from_block_size)) plan_num_rand_blocks.append(num_rand_blocks // 2) plan_from_length.append(from_seq_length) plan_num_rand_blocks.append(num_rand_blocks - (num_rand_blocks // 2)) else: plan_from_length.append(from_seq_length) plan_num_rand_blocks.append(num_rand_blocks) return plan_from_length, plan_num_rand_blocks @staticmethod def _bigbird_block_rand_mask( from_seq_length, to_seq_length, from_block_size, to_block_size, num_rand_blocks, last_idx=-1 ): """ Create adjacency list of random attention. Args: from_seq_length: int. length of from sequence. to_seq_length: int. length of to sequence. from_block_size: int. size of block in from sequence. to_block_size: int. size of block in to sequence. num_rand_blocks: int. Number of random chunks per row. last_idx: if -1 then num_rand_blocks blocks chosen anywhere in to sequence, if positive then num_rand_blocks blocks chosen only up to last_idx. Returns: adjacency list of size from_seq_length//from_block_size-2 by num_rand_blocks """ # using this method when from_seq_length in [1024, 3072, 4096] assert ( from_seq_length // from_block_size == to_seq_length // to_block_size ), "Error the number of blocks needs to be same!" rand_attn = np.zeros((from_seq_length // from_block_size - 2, num_rand_blocks), dtype=np.int32) middle_seq = np.arange(1, to_seq_length // to_block_size - 1, dtype=np.int32) last = to_seq_length // to_block_size - 1 if last_idx > (2 * to_block_size): last = (last_idx // to_block_size) - 1 r = num_rand_blocks # shorthand for i in range(1, from_seq_length // from_block_size - 1): start = i - 2 end = i if i == 1: rand_attn[i - 1, :] = np.random.permutation(middle_seq[2:last])[:r] elif i == 2: rand_attn[i - 1, :] = np.random.permutation(middle_seq[3:last])[:r] elif i == from_seq_length // from_block_size - 3: rand_attn[i - 1, :] = np.random.permutation(middle_seq[:last])[:r] # Missing -3: should have been sliced till last-3 elif i == from_seq_length // from_block_size - 2: rand_attn[i - 1, :] = np.random.permutation(middle_seq[:last])[:r] # Missing -4: should have been sliced till last-4 else: if start > last: start = last rand_attn[i - 1, :] = np.random.permutation(middle_seq[:start])[:r] elif (end + 1) == last: rand_attn[i - 1, :] = np.random.permutation(middle_seq[:start])[:r] else: rand_attn[i - 1, :] = np.random.permutation( np.concatenate((middle_seq[:start], middle_seq[end + 1 : last])) )[:r] return rand_attn def _bigbird_block_rand_mask_with_head( self, from_seq_length, to_seq_length, from_block_size, to_block_size, num_heads, plan_from_length, plan_num_rand_blocks, window_block_left=1, window_block_right=1, global_block_top=1, global_block_bottom=1, global_block_left=1, global_block_right=1, ): """ Create adjacency list of random attention. Args: from_seq_length: int. length of from sequence. to_seq_length: int. length of to sequence. from_block_size: int. size of block in from sequence. to_block_size: int. size of block in to sequence. num_heads: int. total number of heads. plan_from_length: list. plan from length where num_random_blocks are chosen from. plan_num_rand_blocks: list. number of rand blocks within the plan. window_block_left: int. number of blocks of window to left of a block. window_block_right: int. number of blocks of window to right of a block. global_block_top: int. number of blocks at the top. global_block_bottom: int. number of blocks at the bottom. global_block_left: int. Number of blocks globally used to the left. global_block_right: int. Number of blocks globally used to the right. Returns: adjacency list of size num_head where each element is of size from_seq_length//from_block_size-2 by num_rand_blocks """ # using this method when from_seq_length not in [1024, 3072, 4096] assert ( from_seq_length // from_block_size == to_seq_length // to_block_size ), "Error the number of blocks needs to be same!" assert from_seq_length in plan_from_length, "Error from sequence length not in plan!" # Total number of blocks in the mmask num_blocks = from_seq_length // from_block_size # Number of blocks per plan plan_block_length = np.array(plan_from_length) // from_block_size # till when to follow plan max_plan_idx = plan_from_length.index(from_seq_length) # Random Attention adjacency list rand_attn = [ np.zeros((num_blocks, np.sum(plan_num_rand_blocks[: max_plan_idx + 1])), dtype=np.int32) for i in range(num_heads) ] # We will go iteratively over the plan blocks and pick random number of # Attention blocks from the legally allowed blocks for plan_idx in range(max_plan_idx + 1): rnd_r_cnt = 0 if plan_idx > 0: # set the row for all from_blocks starting from 0 to # plan_block_length[plan_idx-1] # column indx start fromm plan_block_length[plan_idx-1] and ends at # plan_block_length[plan_idx] if plan_num_rand_blocks[plan_idx] > 0: rnd_r_cnt = int(np.sum(plan_num_rand_blocks[:plan_idx])) curr_r_cnt = int(np.sum(plan_num_rand_blocks[: plan_idx + 1])) for blk_rw_idx in range(global_block_top, plan_block_length[plan_idx - 1]): for h in range(num_heads): rand_attn[h][blk_rw_idx, rnd_r_cnt:curr_r_cnt] = self._get_single_block_row_attention( block_id=blk_rw_idx, to_start_block_id=plan_block_length[plan_idx - 1], to_end_block_id=plan_block_length[plan_idx], num_rand_blocks=plan_num_rand_blocks[plan_idx], window_block_left=window_block_left, window_block_right=window_block_right, global_block_left=global_block_left, global_block_right=global_block_right, ) for pl_id in range(plan_idx): if plan_num_rand_blocks[pl_id] == 0: continue for blk_rw_idx in range(plan_block_length[plan_idx - 1], plan_block_length[plan_idx]): rnd_r_cnt = 0 to_start_block_id = 0 if pl_id > 0: rnd_r_cnt = int(np.sum(plan_num_rand_blocks[:pl_id])) to_start_block_id = plan_block_length[pl_id - 1] curr_r_cnt = int(np.sum(plan_num_rand_blocks[: pl_id + 1])) for h in range(num_heads): rand_attn[h][blk_rw_idx, rnd_r_cnt:curr_r_cnt] = self._get_single_block_row_attention( block_id=blk_rw_idx, to_start_block_id=to_start_block_id, to_end_block_id=plan_block_length[pl_id], num_rand_blocks=plan_num_rand_blocks[pl_id], window_block_left=window_block_left, window_block_right=window_block_right, global_block_left=global_block_left, global_block_right=global_block_right, ) if plan_num_rand_blocks[plan_idx] == 0: continue curr_r_cnt = int(np.sum(plan_num_rand_blocks[: plan_idx + 1])) from_start_block_id = global_block_top to_start_block_id = 0 if plan_idx > 0: rnd_r_cnt = int(np.sum(plan_num_rand_blocks[:plan_idx])) from_start_block_id = plan_block_length[plan_idx - 1] to_start_block_id = plan_block_length[plan_idx - 1] for blk_rw_idx in range(from_start_block_id, plan_block_length[plan_idx]): for h in range(num_heads): rand_attn[h][blk_rw_idx, rnd_r_cnt:curr_r_cnt] = self._get_single_block_row_attention( block_id=blk_rw_idx, to_start_block_id=to_start_block_id, to_end_block_id=plan_block_length[plan_idx], num_rand_blocks=plan_num_rand_blocks[plan_idx], window_block_left=window_block_left, window_block_right=window_block_right, global_block_left=global_block_left, global_block_right=global_block_right, ) for nh in range(num_heads): rand_attn[nh] = rand_attn[nh][global_block_top : num_blocks - global_block_bottom, :] return rand_attn @staticmethod def _get_single_block_row_attention( block_id, to_start_block_id, to_end_block_id, num_rand_blocks, window_block_left=1, window_block_right=1, global_block_left=1, global_block_right=1, ): """ For a single row block get random row attention. Args: block_id: int. block id of row. to_start_block_id: int. random attention column start id. to_end_block_id: int. random attention column end id. num_rand_blocks: int. number of random blocks to be selected. window_block_left: int. number of blocks of window to left of a block. window_block_right: int. number of blocks of window to right of a block. global_block_left: int. Number of blocks globally used to the left. global_block_right: int. Number of blocks globally used to the right. Returns: row containing the random attention vector of size num_rand_blocks. """ # list of to_blocks from which to choose random attention to_block_list = np.arange(to_start_block_id, to_end_block_id, dtype=np.int32) # permute the blocks perm_block = np.random.permutation(to_block_list) # illegal blocks for the current block id, using window illegal_blocks = list(range(block_id - window_block_left, block_id + window_block_right + 1)) # Add blocks at the start and at the end illegal_blocks.extend(list(range(global_block_left))) illegal_blocks.extend(list(range(to_end_block_id - global_block_right, to_end_block_id))) # The second from_block cannot choose random attention on second last to_block if block_id == 1: illegal_blocks.append(to_end_block_id - 2) # The second last from_block cannot choose random attention on second to_block if block_id == to_end_block_id - 2: illegal_blocks.append(1) selected_random_blokcs = [] for i in range(to_end_block_id - to_start_block_id): if perm_block[i] not in illegal_blocks: selected_random_blokcs.append(perm_block[i]) if len(selected_random_blokcs) == num_rand_blocks: break return np.array(selected_random_blokcs, dtype=np.int32) # Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->BigBird class BigBirdSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BigBirdAttention(nn.Module): def __init__(self, config, seed=None): super().__init__() self.attention_type = config.attention_type self.config = config self.seed = seed if self.config.attention_type == "original_full": self.self = BigBirdSelfAttention(config) elif self.config.attention_type == "block_sparse": self.self = BigBirdBlockSparseAttention(config, seed) else: raise ValueError( f"attention_type can either be original_full or block_sparse, but is {self.config.attention_type}" ) self.output = BigBirdSelfOutput(config) def set_attention_type(self, value: str): if value not in ["original_full", "block_sparse"]: raise ValueError( f"attention_type can only be set to either 'original_full' or 'block_sparse', but is {value}" ) # attention type is already correctly set if value == self.attention_type: return self.attention_type = value if value == "original_full": # copy all weights to new full attention class attn_weights = BigBirdSelfAttention(self.config) else: # copy all weights to new sparse attention class attn_weights = BigBirdBlockSparseAttention(self.config, self.seed) attn_weights.query = self.self.query attn_weights.value = self.self.value attn_weights.key = self.self.key self.self = attn_weights self.attention_type = value if not self.training: self.self.eval() def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, # block_sparse config band_mask=None, from_mask=None, to_mask=None, from_blocked_mask=None, to_blocked_mask=None, ): if self.attention_type == "original_full": self_outputs = self.self( hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) else: assert ( encoder_hidden_states is None ), "BigBird cannot be used as a decoder when config.attention_type != 'original_full'" self_outputs = self.self( hidden_states, band_mask, from_mask, to_mask, from_blocked_mask, to_blocked_mask, output_attentions ) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs # Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->BigBird class BigBirdIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->BigBird class BigBirdOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BigBirdLayer(nn.Module): def __init__(self, config, seed=None): super().__init__() self.config = config self.attention_type = config.attention_type self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = BigBirdAttention(config, seed=seed) self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: assert self.is_decoder, f"{self} should be used as a decoder model if cross attention is added" self.crossattention = BigBirdAttention(config) self.intermediate = BigBirdIntermediate(config) self.output = BigBirdOutput(config) def set_attention_type(self, value: str): if value not in ["original_full", "block_sparse"]: raise ValueError( f"attention_type can only be set to either 'original_full' or 'block_sparse', but is {value}" ) # attention type is already correctly set if value == self.attention_type: return self.attention_type = value self.attention.set_attention_type(value) if self.add_cross_attention: self.crossattention.set_attention_type(value) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, band_mask=None, from_mask=None, to_mask=None, blocked_encoder_mask=None, past_key_value=None, output_attentions=False, ): # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention( hidden_states, attention_mask, head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_value=self_attn_past_key_value, output_attentions=output_attentions, band_mask=band_mask, from_mask=from_mask, to_mask=to_mask, from_blocked_mask=blocked_encoder_mask, to_blocked_mask=blocked_encoder_mask, ) attention_output = self_attention_outputs[0] # if decoder, the last output is tuple of self-attn cache if self.is_decoder: outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] else: outputs = self_attention_outputs[1:] # add self attentions if we output attention weights cross_attn_present_key_value = None if self.is_decoder and encoder_hidden_states is not None: if not hasattr(self, "crossattention"): raise ValueError( f"If `encoder_hidden_states` are passed, {self} has to be instantiated with \ cross-attention layers by setting `config.add_cross_attention=True`" ) # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None cross_attention_outputs = self.crossattention( attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, cross_attn_past_key_value, output_attentions, ) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights # add cross-attn cache to positions 3,4 of present_key_value tuple cross_attn_present_key_value = cross_attention_outputs[-1] present_key_value = present_key_value + cross_attn_present_key_value layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output ) outputs = (layer_output,) + outputs # if decoder, return the attn key/values as the last output if self.is_decoder: outputs = outputs + (present_key_value,) return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output class BigBirdEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.attention_type = config.attention_type self.layer = nn.ModuleList( [BigBirdLayer(config, seed=layer_idx) for layer_idx in range(config.num_hidden_layers)] ) def set_attention_type(self, value: str): if value not in ["original_full", "block_sparse"]: raise ValueError( f"attention_type can only be set to either 'original_full' or 'block_sparse', but is {value}" ) # attention type is already correctly set if value == self.attention_type: return self.attention_type = value for layer in self.layer: layer.set_attention_type(value) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, use_cache=None, output_attentions=False, output_hidden_states=False, band_mask=None, from_mask=None, to_mask=None, blocked_encoder_mask=None, return_dict=True, ): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None next_decoder_cache = () if use_cache else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None past_key_value = past_key_values[i] if past_key_values is not None else None if getattr(self.config, "gradient_checkpointing", False) and self.training: if use_cache: logger.warning( "`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting " "`use_cache=False`..." ) use_cache = False def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs, past_key_value, output_attentions) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(layer_module), hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, band_mask, from_mask, to_mask, blocked_encoder_mask, ) else: layer_outputs = layer_module( hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, band_mask, from_mask, to_mask, blocked_encoder_mask, past_key_value, output_attentions, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions, ] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions, ) # Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform with Bert->BigBird class BigBirdPredictionHeadTransform(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) if isinstance(config.hidden_act, str): self.transform_act_fn = ACT2FN[config.hidden_act] else: self.transform_act_fn = config.hidden_act self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->BigBird class BigBirdLMPredictionHead(nn.Module): def __init__(self, config): super().__init__() self.transform = BigBirdPredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` self.decoder.bias = self.bias def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertOnlyMLMHead with Bert->BigBird class BigBirdOnlyMLMHead(nn.Module): def __init__(self, config): super().__init__() self.predictions = BigBirdLMPredictionHead(config) def forward(self, sequence_output): prediction_scores = self.predictions(sequence_output) return prediction_scores # Copied from transformers.models.bert.modeling_bert.BertOnlyNSPHead with Bert->BigBird class BigBirdOnlyNSPHead(nn.Module): def __init__(self, config): super().__init__() self.seq_relationship = nn.Linear(config.hidden_size, 2) def forward(self, pooled_output): seq_relationship_score = self.seq_relationship(pooled_output) return seq_relationship_score # Copied from transformers.models.bert.modeling_bert.BertPreTrainingHeads with Bert->BigBird class BigBirdPreTrainingHeads(nn.Module): def __init__(self, config): super().__init__() self.predictions = BigBirdLMPredictionHead(config) self.seq_relationship = nn.Linear(config.hidden_size, 2) def forward(self, sequence_output, pooled_output): prediction_scores = self.predictions(sequence_output) seq_relationship_score = self.seq_relationship(pooled_output) return prediction_scores, seq_relationship_score class BigBirdPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = BigBirdConfig load_tf_weights = load_tf_weights_in_big_bird base_model_prefix = "bert" _keys_to_ignore_on_load_missing = [r"position_ids"] def _init_weights(self, module): """Initialize the weights""" if isinstance(module, nn.Linear): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) BIG_BIRD_START_DOCSTRING = r""" This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config (:class:`~transformers.BigBirdConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. """ BIG_BIRD_INPUTS_DOCSTRING = r""" Args: input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using :class:`transformers.BigBirdTokenizer`. See :func:`transformers.PreTrainedTokenizer.encode` and :func:`transformers.PreTrainedTokenizer.__call__` for details. `What are input IDs? <../glossary.html#input-ids>`__ attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. `What are attention masks? <../glossary.html#attention-mask>`__ token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`): Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0, 1]``: - 0 corresponds to a `sentence A` token, - 1 corresponds to a `sentence B` token. `What are token type IDs? <../glossary.html#token-type-ids>`_ position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0, config.max_position_embeddings - 1]``. `What are position IDs? <../glossary.html#position-ids>`_ head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`): Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`): Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (:obj:`bool`, `optional`): Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned tensors for more detail. output_hidden_states (:obj:`bool`, `optional`): Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for more detail. return_dict (:obj:`bool`, `optional`): Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. """ @dataclass class BigBirdForPreTrainingOutput(ModelOutput): """ Output type of :class:`~transformers.BigBirdForPreTraining`. Args: loss (`optional`, returned when ``labels`` is provided, ``torch.FloatTensor`` of shape :obj:`(1,)`): Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss. prediction_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). seq_relationship_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, 2)`): Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax). hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None prediction_logits: torch.FloatTensor = None seq_relationship_logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class BigBirdForQuestionAnsweringModelOutput(ModelOutput): """ Base class for outputs of question answering models. Args: loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided): Total span extraction loss is the sum of a Cross-Entropy for the start and end positions. start_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`): Span-start scores (before SoftMax). end_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`): Span-end scores (before SoftMax). pooler_output (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, 1)`): pooler output from BigBigModel hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None start_logits: torch.FloatTensor = None end_logits: torch.FloatTensor = None pooler_output: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None @add_start_docstrings( "The bare BigBird Model transformer outputting raw hidden-states without any specific head on top.", BIG_BIRD_START_DOCSTRING, ) class BigBirdModel(BigBirdPreTrainedModel): """ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in `Attention is all you need <https://arxiv.org/abs/1706.03762>`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. To behave as an decoder the model needs to be initialized with the :obj:`is_decoder` argument of the configuration set to :obj:`True`. To be used in a Seq2Seq model, the model needs to initialized with both :obj:`is_decoder` argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an input to the forward pass. """ def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.attention_type = self.config.attention_type self.config = config self.block_size = self.config.block_size self.embeddings = BigBirdEmbeddings(config) self.encoder = BigBirdEncoder(config) if add_pooling_layer: self.pooler = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() else: self.pooler = None self.activation = None if self.attention_type != "original_full" and config.add_cross_attention: logger.warning( "When using `BigBirdForCausalLM` as decoder, then `attention_type` must be `original_full`. Setting `attention_type=original_full`" ) self.set_attention_type("original_full") self.init_weights() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def set_attention_type(self, value: str): if value not in ["original_full", "block_sparse"]: raise ValueError( f"attention_type can only be set to either 'original_full' or 'block_sparse', but is {value}" ) # attention type is already correctly set if value == self.attention_type: return self.attention_type = value self.encoder.set_attention_type(value) @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPoolingAndCrossAttentions, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids` (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)` instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`. use_cache (:obj:`bool`, `optional`): If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up decoding (see :obj:`past_key_values`). """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if self.config.is_decoder: use_cache = use_cache if use_cache is not None else self.config.use_cache else: use_cache = False if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") batch_size, seq_length = input_shape device = input_ids.device if input_ids is not None else inputs_embeds.device # past_key_values_length past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if attention_mask is None: attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device) if token_type_ids is None: if hasattr(self.embeddings, "token_type_ids"): buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) # in order to use block_sparse attention, sequence_length has to be at least # bigger than all global attentions: 2 * block_size # + sliding tokens: 3 * block_size # + random tokens: 2 * num_random_blocks * block_size max_tokens_to_attend = (5 + 2 * self.config.num_random_blocks) * self.config.block_size if self.attention_type == "block_sparse" and seq_length <= max_tokens_to_attend: # change attention_type from block_sparse to original_full sequence_length = input_ids.size(1) if input_ids is not None else inputs_embeds.size(1) logger.warning( "Attention type 'block_sparse' is not possible if sequence_length: " f"{sequence_length} <= num global tokens: 2 * config.block_size " "+ min. num sliding tokens: 3 * config.block_size " "+ config.num_random_blocks * config.block_size " "+ additional buffer: config.num_random_blocks * config.block_size " f"= {max_tokens_to_attend} with config.block_size " f"= {self.config.block_size}, config.num_random_blocks " f"= {self.config.num_random_blocks}." "Changing attention type to 'original_full'..." ) self.set_attention_type("original_full") if self.attention_type == "block_sparse": ( padding_len, input_ids, attention_mask, token_type_ids, position_ids, inputs_embeds, ) = self._pad_to_block_size( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, pad_token_id=self.config.pad_token_id, ) else: padding_len = 0 if self.attention_type == "block_sparse": blocked_encoder_mask, band_mask, from_mask, to_mask = self.create_masks_for_block_sparse_attn( attention_mask, self.block_size ) extended_attention_mask = None elif self.attention_type == "original_full": blocked_encoder_mask = None band_mask = None from_mask = None to_mask = None # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask: torch.Tensor = self.get_extended_attention_mask( attention_mask, input_shape, device ) else: raise ValueError( f"attention_type can either be original_full or block_sparse, but is {self.attention_type}" ) # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.config.is_decoder and encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length, ) encoder_outputs = self.encoder( embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, band_mask=band_mask, from_mask=from_mask, to_mask=to_mask, blocked_encoder_mask=blocked_encoder_mask, return_dict=return_dict, ) sequence_output = encoder_outputs[0] pooler_output = self.activation(self.pooler(sequence_output[:, 0, :])) if (self.pooler is not None) else None # undo padding if padding_len > 0: # unpad `sequence_output` because the calling function is expecting a length == input_ids.size(1) sequence_output = sequence_output[:, :-padding_len] if not return_dict: return (sequence_output, pooler_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndCrossAttentions( last_hidden_state=sequence_output, pooler_output=pooler_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, ) @staticmethod def create_masks_for_block_sparse_attn(attention_mask: torch.Tensor, block_size: int): batch_size, seq_length = attention_mask.size() assert ( seq_length % block_size == 0 ), f"Sequence length must be multiple of block size, but sequence length is {seq_length}, while block size is {block_size}." def create_band_mask_from_inputs(from_blocked_mask, to_blocked_mask): """ Create 3D attention mask from a 2D tensor mask. Args: from_blocked_mask: 2D Tensor of shape [batch_size, from_seq_length//from_block_size, from_block_size]. to_blocked_mask: int32 Tensor of shape [batch_size, to_seq_length//to_block_size, to_block_size]. Returns: float Tensor of shape [batch_size, 1, from_seq_length//from_block_size-4, from_block_size, 3*to_block_size]. """ exp_blocked_to_pad = torch.cat( [to_blocked_mask[:, 1:-3], to_blocked_mask[:, 2:-2], to_blocked_mask[:, 3:-1]], dim=2 ) band_mask = torch.einsum("blq,blk->blqk", from_blocked_mask[:, 2:-2], exp_blocked_to_pad) band_mask.unsqueeze_(1) return band_mask blocked_encoder_mask = attention_mask.view(batch_size, seq_length // block_size, block_size) band_mask = create_band_mask_from_inputs(blocked_encoder_mask, blocked_encoder_mask) from_mask = attention_mask.view(batch_size, 1, seq_length, 1) to_mask = attention_mask.view(batch_size, 1, 1, seq_length) return blocked_encoder_mask, band_mask, from_mask, to_mask def _pad_to_block_size( self, input_ids: torch.Tensor, attention_mask: torch.Tensor, token_type_ids: torch.Tensor, position_ids: torch.Tensor, inputs_embeds: torch.Tensor, pad_token_id: int, ): """A helper function to pad tokens and mask to work with implementation of BigBird block-sparse attention.""" # padding block_size = self.config.block_size input_shape = input_ids.shape if input_ids is not None else inputs_embeds.shape batch_size, seq_len = input_shape[:2] padding_len = (block_size - seq_len % block_size) % block_size if padding_len > 0: logger.info( f"Input ids are automatically padded from {seq_len} to {seq_len + padding_len} to be a multiple of " f"`config.block_size`: {block_size}" ) if input_ids is not None: input_ids = nn.functional.pad(input_ids, (0, padding_len), value=pad_token_id) if position_ids is not None: # pad with position_id = pad_token_id as in modeling_bigbird.BigBirdEmbeddings position_ids = nn.functional.pad(position_ids, (0, padding_len), value=pad_token_id) if inputs_embeds is not None: input_ids_padding = inputs_embeds.new_full( (batch_size, padding_len), self.config.pad_token_id, dtype=torch.long, ) inputs_embeds_padding = self.embeddings(input_ids_padding) inputs_embeds = torch.cat([inputs_embeds, inputs_embeds_padding], dim=-2) attention_mask = nn.functional.pad( attention_mask, (0, padding_len), value=False ) # no attention on the padding tokens token_type_ids = nn.functional.pad(token_type_ids, (0, padding_len), value=0) # pad with token_type_id = 0 return padding_len, input_ids, attention_mask, token_type_ids, position_ids, inputs_embeds class BigBirdForPreTraining(BigBirdPreTrainedModel): def __init__(self, config): super().__init__(config) self.bert = BigBirdModel(config, add_pooling_layer=True) self.cls = BigBirdPreTrainingHeads(config) self.init_weights() def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=BigBirdForPreTrainingOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, next_sentence_label=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" labels (:obj:`torch.LongTensor` of shape ``(batch_size, sequence_length)``, `optional`): Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]`` next_sentence_label (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`): Labels for computing the next sequence prediction (classification) loss. If specified, nsp loss will be added to masked_lm loss. Input should be a sequence pair (see :obj:`input_ids` docstring) Indices should be in ``[0, 1]``: - 0 indicates sequence B is a continuation of sequence A, - 1 indicates sequence B is a random sequence. kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`): Used to hide legacy arguments that have been deprecated. Returns: Example:: >>> from transformers import BigBirdTokenizer, BigBirdForPreTraining >>> import torch >>> tokenizer = BigBirdTokenizer.from_pretrained('bigbird-roberta-base') >>> model = BigBirdForPreTraining.from_pretrained('bigbird-roberta-base') >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) >>> prediction_logits = outputs.prediction_logits >>> seq_relationship_logits = outputs.seq_relationship_logits """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output, pooled_output = outputs[:2] prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output) total_loss = None if labels is not None: loss_fct = CrossEntropyLoss() total_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if next_sentence_label is not None and total_loss is not None: next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1)) total_loss = total_loss + next_sentence_loss if not return_dict: output = (prediction_scores, seq_relationship_score) + outputs[2:] return ((total_loss,) + output) if total_loss is not None else output return BigBirdForPreTrainingOutput( loss=total_loss, prediction_logits=prediction_scores, seq_relationship_logits=seq_relationship_score, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings("""BigBird Model with a `language modeling` head on top. """, BIG_BIRD_START_DOCSTRING) class BigBirdForMaskedLM(BigBirdPreTrainedModel): def __init__(self, config): super().__init__(config) if config.is_decoder: logger.warning( "If you want to use `BigBirdForMaskedLM` make sure `config.is_decoder=False` for " "bi-directional self-attention." ) self.bert = BigBirdModel(config) self.cls = BigBirdOnlyMLMHead(config) self.init_weights() def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] prediction_scores = self.cls(sequence_output) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() # -100 index = padding token masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[2:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return MaskedLMOutput( loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs): input_shape = input_ids.shape effective_batch_size = input_shape[0] # add a dummy token assert self.config.pad_token_id is not None, "The PAD token should be defined for generation" attention_mask = torch.cat([attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1) dummy_token = torch.full( (effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device ) input_ids = torch.cat([input_ids, dummy_token], dim=1) return {"input_ids": input_ids, "attention_mask": attention_mask} @add_start_docstrings( """BigBird Model with a `language modeling` head on top for CLM fine-tuning. """, BIG_BIRD_START_DOCSTRING ) class BigBirdForCausalLM(BigBirdPreTrainedModel): _keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"] def __init__(self, config): super().__init__(config) if not config.is_decoder: logger.warning("If you want to use `BigBirdForCausalLM` as a standalone, add `is_decoder=True.`") self.bert = BigBirdModel(config) self.cls = BigBirdOnlyMLMHead(config) self.init_weights() def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, labels=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids` (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)` instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`. labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels n ``[0, ..., config.vocab_size]``. use_cache (:obj:`bool`, `optional`): If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up decoding (see :obj:`past_key_values`). Returns: Example:: >>> from transformers import BigBirdTokenizer, BigBirdForCausalLM, BigBirdConfig >>> import torch >>> tokenizer = BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base') >>> config = BigBirdConfig.from_pretrained("google/bigbird-base") >>> config.is_decoder = True >>> model = BigBirdForCausalLM.from_pretrained('google/bigbird-roberta-base', config=config) >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) >>> prediction_logits = outputs.logits """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] prediction_scores = self.cls(sequence_output) lm_loss = None if labels is not None: # we are doing next-token prediction; shift prediction scores and input ids by one shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous() labels = labels[:, 1:].contiguous() loss_fct = CrossEntropyLoss() lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[2:] return ((lm_loss,) + output) if lm_loss is not None else output return CausalLMOutputWithCrossAttentions( loss=lm_loss, logits=prediction_scores, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, **model_kwargs): input_shape = input_ids.shape # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly if attention_mask is None: attention_mask = input_ids.new_ones(input_shape) # cut decoder_input_ids if past is used if past is not None: input_ids = input_ids[:, -1:] return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past} def _reorder_cache(self, past, beam_idx): reordered_past = () for layer_past in past: reordered_past += ( tuple(past_state.index_select(0, beam_idx) for past_state in layer_past[:2]) + layer_past[2:], ) return reordered_past class BigBirdClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) classifier_dropout = ( config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob ) self.dropout = nn.Dropout(classifier_dropout) self.out_proj = nn.Linear(config.hidden_size, config.num_labels) self.config = config def forward(self, features, **kwargs): x = features[:, 0, :] # take <s> token (equiv. to [CLS]) x = self.dropout(x) x = self.dense(x) x = ACT2FN[self.config.hidden_act](x) x = self.dropout(x) x = self.out_proj(x) return x @add_start_docstrings( """ BigBird Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, BIG_BIRD_START_DOCSTRING, ) class BigBirdForSequenceClassification(BigBirdPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.config = config self.bert = BigBirdModel(config) self.classifier = BigBirdClassificationHead(config) self.init_weights() @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ..., config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss), If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.classifier(sequence_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ BigBird Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """, BIG_BIRD_START_DOCSTRING, ) class BigBirdForMultipleChoice(BigBirdPreTrainedModel): def __init__(self, config): super().__init__(config) self.bert = BigBirdModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) self.init_weights() @add_start_docstrings_to_model_forward( BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length") ) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Labels for computing the multiple choice classification loss. Indices should be in ``[0, ..., num_choices-1]`` where :obj:`num_choices` is the size of the second dimension of the input tensors. (See :obj:`input_ids` above) """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None inputs_embeds = ( inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None ) outputs = self.bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) if not return_dict: output = (reshaped_logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return MultipleChoiceModelOutput( loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ BigBird Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, BIG_BIRD_START_DOCSTRING, ) class BigBirdForTokenClassification(BigBirdPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.bert = BigBirdModel(config) classifier_dropout = ( config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob ) self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.init_weights() @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels - 1]``. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() # Only keep active parts of the loss if attention_mask is not None: active_loss = attention_mask.view(-1) == 1 active_logits = logits.view(-1, self.num_labels) active_labels = torch.where( active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels) ) loss = loss_fct(active_logits, active_labels) else: loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class BigBirdForQuestionAnsweringHead(nn.Module): """Head for question answering tasks.""" def __init__(self, config): super().__init__() self.dropout = nn.Dropout(config.hidden_dropout_prob) self.intermediate = BigBirdIntermediate(config) self.output = BigBirdOutput(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) def forward(self, encoder_output): hidden_states = self.dropout(encoder_output) hidden_states = self.intermediate(hidden_states) hidden_states = self.output(hidden_states, encoder_output) hidden_states = self.qa_outputs(hidden_states) return hidden_states @add_start_docstrings( """ BigBird Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """, BIG_BIRD_START_DOCSTRING, ) class BigBirdForQuestionAnswering(BigBirdPreTrainedModel): def __init__(self, config, add_pooling_layer=False): super().__init__(config) config.num_labels = 2 self.num_labels = config.num_labels self.sep_token_id = config.sep_token_id self.bert = BigBirdModel(config, add_pooling_layer=add_pooling_layer) self.qa_classifier = BigBirdForQuestionAnsweringHead(config) self.init_weights() @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="google/bigbird-base-trivia-itc", output_type=BigBirdForQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, question_lengths=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, start_positions=None, end_positions=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict seqlen = input_ids.size(1) if input_ids is not None else inputs_embeds.size(1) if question_lengths is None and input_ids is not None: # assuming input_ids format: <cls> <question> <sep> context <sep> question_lengths = torch.argmax(input_ids.eq(self.sep_token_id).int(), dim=-1) + 1 question_lengths.unsqueeze_(1) logits_mask = None if question_lengths is not None: # setting lengths logits to `-inf` logits_mask = self.prepare_question_mask(question_lengths, seqlen) if token_type_ids is None: token_type_ids = (~logits_mask).long() logits_mask = logits_mask logits_mask[:, 0] = False logits_mask.unsqueeze_(2) outputs = self.bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.qa_classifier(sequence_output) if logits_mask is not None: # removing question tokens from the competition logits = logits - logits_mask * 1e6 start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[2:] return ((total_loss,) + output) if total_loss is not None else output return BigBirdForQuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, pooler_output=outputs.pooler_output, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @staticmethod def prepare_question_mask(q_lengths: torch.Tensor, maxlen: int): # q_lengths -> (bz, 1) mask = torch.arange(0, maxlen).to(q_lengths.device) mask.unsqueeze_(0) # -> (1, maxlen) mask = mask < q_lengths return mask
import yaml import sys import os def get_or(d, k, e): if k in d: d[k] else: e def print_args(c): if 'args' in c: print() print(f"**Arguments**") args = c['args'] print(""" |Name|Switch|Kind|Multiple|Description| |----|------|----|--------|-----------|""") for a in args: for name, arg in a.items(): if get_or(arg, 'takes_value', False) == True: kind = "argument" else: kind = "switch/flag" if 'multiple' in arg and arg['multiple'] == True: multiple = "yes" else: multiple = "no" print( f"|{name}|{get_or(arg, "short", "-")}|{kind}|{multiple}|{arg["about"].strip()}|") def print_commands(c, pfx=""): if 'subcommands' in c: subcommands = c['subcommands'] print() print(f"**Subcommands**") print(""" |Command|Description| |-------|-----------|""") for c in subcommands: for name, command in c.items(): print( f"|[{name}](#{pfx}{name})|{command["about"].strip()}|") def print_subcommand(p, c): for name, command in c.items(): print() if len(p) == 0: print(f"{"#" * (len(p) + 3)} **{name}**") else: print(f"{"#" * (len(p) + 3)} {" ".join(p)} **{name}**") subp = p.copy() subp.append(name) args = [] if 'args' in command: for cmd_args in command['args']: for arg_name, arg in cmd_args.items(): if not 'short' in arg and not 'long' in arg: if get_or(arg, 'required', False): args.append(f"<{arg_name}>") else: args.append(f"[<{arg_name}>]") print() print(f"{command["about"]}") print() if not 'subcommands' in command: print(f"**Usage**") print() print("```") if len(args) > 0: print(f"tremor {" ".join(subp)} {" ".join(args)}") else: print(f"tremor {" ".join(subp)}") print("```") print_args(command) print_commands(command, '-'.join(subp) + '-') if 'subcommands' in command: for s in command['subcommands']: print_subcommand(subp, s) with open(sys.argv[1], 'r') as stream: try: y = yaml.safe_load(stream) print(f"# Tremor cli v{y["version"]}") print() print(y['about']) print() print(""" ## Scope This document summarises the `tremor` cli commands. ## Audience Tremor operators and developers ## General flags and switches """) print_args(y) print() print("## Commands") print_commands(y) for c in y['subcommands']: print_subcommand([], c) except yaml.YAMLError as exc: print(exc)
import yaml import sys import os def get_or(d, k, e): if k in d: d[k] else: e def print_args(c): if 'args' in c: print() print(f"**Arguments**") args = c['args'] print(""" |Name|Switch|Kind|Multiple|Description| |----|------|----|--------|-----------|""") for a in args: for name, arg in a.items(): if get_or(arg, 'takes_value', False) == True: kind = "argument" else: kind = "switch/flag" if 'multiple' in arg and arg['multiple'] == True: multiple = "yes" else: multiple = "no" print( f"|{name}|{get_or(arg, 'short', '-')}|{kind}|{multiple}|{arg['about'].strip()}|") def print_commands(c, pfx=""): if 'subcommands' in c: subcommands = c['subcommands'] print() print(f"**Subcommands**") print(""" |Command|Description| |-------|-----------|""") for c in subcommands: for name, command in c.items(): print( f"|[{name}](#{pfx}{name})|{command['about'].strip()}|") def print_subcommand(p, c): for name, command in c.items(): print() if len(p) == 0: print(f"{'#' * (len(p) + 3)} **{name}**") else: print(f"{'#' * (len(p) + 3)} {' '.join(p)} **{name}**") subp = p.copy() subp.append(name) args = [] if 'args' in command: for cmd_args in command['args']: for arg_name, arg in cmd_args.items(): if not 'short' in arg and not 'long' in arg: if get_or(arg, 'required', False): args.append(f"<{arg_name}>") else: args.append(f"[<{arg_name}>]") print() print(f"{command['about']}") print() if not 'subcommands' in command: print(f"**Usage**") print() print("```") if len(args) > 0: print(f"tremor {' '.join(subp)} {' '.join(args)}") else: print(f"tremor {' '.join(subp)}") print("```") print_args(command) print_commands(command, '-'.join(subp) + '-') if 'subcommands' in command: for s in command['subcommands']: print_subcommand(subp, s) with open(sys.argv[1], 'r') as stream: try: y = yaml.safe_load(stream) print(f"# Tremor cli v{y['version']}") print() print(y['about']) print() print(""" ## Scope This document summarises the `tremor` cli commands. ## Audience Tremor operators and developers ## General flags and switches """) print_args(y) print() print("## Commands") print_commands(y) for c in y['subcommands']: print_subcommand([], c) except yaml.YAMLError as exc: print(exc)
def get_create_database_query(db_name: str): return f'CREATE or REPLACE database {db_name}' def get_drop_database_query(db_name: str): return f'DROP DATABASE {db_name}' def get_create_constraint_query(label: str, property_name: str, constraint_name: str = ''): """ Build query to create a constraint :param label: node label :param property_name: node property for the constraint :param constraint_name: the constrain name :return: cypher query """ query = 'CREATE CONSTRAINT ' if constraint_name: query += constraint_name query += f' IF NOT EXISTS ON (n:{label}) ASSERT n.{property_name} IS UNIQUE' return query def get_drop_constraint_query(constraint_name: str): return f'DROP CONSTRAINT {constraint_name}' def get_create_index_query(label: str, property_name: str, index_name=''): """ get create index or composity index query. if properties contains """ query = 'CREATE INDEX ' if index_name: query += index_name query += f' IF NOT EXISTS FOR (n:{label}) ON (n.{property_name})' return query def get_drop_index_query(index_name:str): return f'DROP INDEX {index_name}' def get_create_fulltext_index_query(): """ To run the query, need three params: $indexName as str, $labels as array and $properties as array :return: """ return 'CALL db.index.fulltext.createNodeIndex($indexName, $labels, $properties)' def get_create_update_nodes_query( node_label:str, id_name: str, update_properties: list, additional_labels=[], datasource=None, original_entity_types=[], namespace_label: str = '' ): """ Build query to create or update nodes. Make sure for each row, the keys match with properties. :param node_label: the primary node label with id_name constraint or index :param id_name: the indexed property (should be `id` property) :param update_properties: node property names to be updated :param additional_labels: other node labels if exists :param datasource: e.g. KEGG, NCBI Gene :param original_entity_types: e.g. [Gene, Protein, Chemical, Disease] :param namespace_label: some datasouce, e.g GO, can have a different label for each namespace """ query_rows = list() query_rows.append("UNWIND $rows as row") query_rows.append("MERGE (n:%s {%s: row.%s})" % (node_label, id_name, id_name)) if additional_labels or update_properties: prop_sets = [] if additional_labels: label_set = 'n:' + ':'.join(additional_labels) prop_sets.append(label_set) if update_properties: props = [f"n.{prop}=row.{prop}" for prop in update_properties if prop != id_name] prop_sets += props if datasource: prop_sets.append(f"n.data_source='{datasource}'") if original_entity_types: if type(original_entity_types) != list: raise ValueError('Invalid argument for original_entity_type') original_types = '|'.join(original_entity_types) prop_sets.append(f"n.original_entity_types=split('{original_types}', '|')") if len(prop_sets) > 0: query_rows.append('SET ' + ','.join(prop_sets)) if namespace_label: query_rows.append(f"FOREACH (item IN CASE WHEN row.namespace = '{namespace_label}" THEN [1] ELSE [] END | SET n:{namespace_label.title().replace("_", "")})") query_rows.append('RETURN COUNT(*)') return '\n'.join(query_rows) def get_delete_nodes_query(node_label: str, id_name: str): """ build the query to delete a node by matching the node with the given property (id_name). The query will have a parameter $id which is the matched value for the "id_name" property :param node_label: the label of the node to be deleted :param id_name: the :return: cypher query with parameter $ids where $ids is an array for ID's for deletion """ return f'MATCH (n:{node_label}) where n in $ids with n DETACH DELETE n' def get_create_relationships_query( node1_label:str, node1_id:str, node1_col:str, node2_label: str, node2_id: str, node2_col: str, relationship:str, rel_properties=[], foreach=False, foreach_property='' ): """ Build the query to create relationships. :param node1_label: starting node label :param node1_id: starting node id property name (e.g. id, biocyc_id) :param node1_col: dataframe column name for the starting node id :param node2_label: ending node label :param node2_id: ending node id property name (e.g. id, biocyc_id) :param node2_col: dataframe column name for the ending node id :param relationship: the relationship type :param rel_properties: relationship properties """ rows = list() rows.append("UNWIND $rows AS row") rows.append("MATCH (a:%s {%s: row.%s}), (b:%s {%s: row.%s})" % ( node1_label, node1_id, node1_col, node2_label, node2_id, node2_col)) if foreach: rows.append("FOREACH (item IN CASE WHEN row.%s = '%s' THEN [1] ELSE [] END | MERGE (a)-[r:%s]->(b))" % ( foreach_property, relationship, relationship)) else: rows.append(f"MERGE (a)-[r:{relationship}]->(b)") prop_sets = [] if rel_properties: for prop in rel_properties: prop_sets.append(f"r.{prop}=row.{prop}") if prop_sets: set_phrase = ', '.join(prop_sets) rows.append(f"SET {set_phrase}") rows.append('RETURN COUNT(*)') return '\n'.join(rows) def get_create_synonym_relationships_query( node_label:str, node_id:str, node_id_col:str, synonym_col: str, rel_properties=[], return_node_count: bool=False ): """ Build the query to create node, then create relationship with another existing node using dataframe data. Dataframe need to transfer to dictionary using the following code: dict = {'rows': dataframe.to_dict('Records')} :param node_label: the node label :param node_id: the node id name, e.g. 'id', 'biocyc_id' :param node_id_col: the node id column name in the dataframe, e.g. 'start_id', 'string_id' :param synonym_col: the dataframe column name for synonym :param rel_properties: relationship properties for HAS_SYNONYM :return_node_count: If True, return COUNT(r). :return: cypher query with parameter $dict """ query_rows = list() query_rows.append("UNWIND $rows AS row") query_rows.append("MERGE (a:Synonym {name: row.%s}) SET a.lowercase_name=toLower(row.%s)" % (synonym_col, synonym_col)) query_rows.append("WITH row, a MATCH (b:%s {%s: row.%s})" % (node_label, node_id, node_id_col)) query_rows.append("MERGE (b)-[r:HAS_SYNONYM]->(a)") prop_sets = [] for prop in rel_properties: prop_sets.append(f"r.{prop}=row.{prop}") if prop_sets: set_phrase = ', '.join(prop_sets) query_rows.append(f"SET {set_phrase}") if return_node_count: query_rows.append('RETURN COUNT(r)') return '\n'.join(query_rows)
def get_create_database_query(db_name: str): return f'CREATE or REPLACE database {db_name}' def get_drop_database_query(db_name: str): return f'DROP DATABASE {db_name}' def get_create_constraint_query(label: str, property_name: str, constraint_name: str = ''): """ Build query to create a constraint :param label: node label :param property_name: node property for the constraint :param constraint_name: the constrain name :return: cypher query """ query = 'CREATE CONSTRAINT ' if constraint_name: query += constraint_name query += f' IF NOT EXISTS ON (n:{label}) ASSERT n.{property_name} IS UNIQUE' return query def get_drop_constraint_query(constraint_name: str): return f'DROP CONSTRAINT {constraint_name}' def get_create_index_query(label: str, property_name: str, index_name=''): """ get create index or composity index query. if properties contains """ query = 'CREATE INDEX ' if index_name: query += index_name query += f' IF NOT EXISTS FOR (n:{label}) ON (n.{property_name})' return query def get_drop_index_query(index_name:str): return f'DROP INDEX {index_name}' def get_create_fulltext_index_query(): """ To run the query, need three params: $indexName as str, $labels as array and $properties as array :return: """ return 'CALL db.index.fulltext.createNodeIndex($indexName, $labels, $properties)' def get_create_update_nodes_query( node_label:str, id_name: str, update_properties: list, additional_labels=[], datasource=None, original_entity_types=[], namespace_label: str = '' ): """ Build query to create or update nodes. Make sure for each row, the keys match with properties. :param node_label: the primary node label with id_name constraint or index :param id_name: the indexed property (should be `id` property) :param update_properties: node property names to be updated :param additional_labels: other node labels if exists :param datasource: e.g. KEGG, NCBI Gene :param original_entity_types: e.g. [Gene, Protein, Chemical, Disease] :param namespace_label: some datasouce, e.g GO, can have a different label for each namespace """ query_rows = list() query_rows.append("UNWIND $rows as row") query_rows.append("MERGE (n:%s {%s: row.%s})" % (node_label, id_name, id_name)) if additional_labels or update_properties: prop_sets = [] if additional_labels: label_set = 'n:' + ':'.join(additional_labels) prop_sets.append(label_set) if update_properties: props = [f"n.{prop}=row.{prop}" for prop in update_properties if prop != id_name] prop_sets += props if datasource: prop_sets.append(f"n.data_source='{datasource}'") if original_entity_types: if type(original_entity_types) != list: raise ValueError('Invalid argument for original_entity_type') original_types = '|'.join(original_entity_types) prop_sets.append(f"n.original_entity_types=split('{original_types}', '|')") if len(prop_sets) > 0: query_rows.append('SET ' + ','.join(prop_sets)) if namespace_label: query_rows.append(f"FOREACH (item IN CASE WHEN row.namespace = '{namespace_label}' THEN [1] ELSE [] END | SET n:{namespace_label.title().replace('_', '')})") query_rows.append('RETURN COUNT(*)') return '\n'.join(query_rows) def get_delete_nodes_query(node_label: str, id_name: str): """ build the query to delete a node by matching the node with the given property (id_name). The query will have a parameter $id which is the matched value for the "id_name" property :param node_label: the label of the node to be deleted :param id_name: the :return: cypher query with parameter $ids where $ids is an array for ID's for deletion """ return f'MATCH (n:{node_label}) where n in $ids with n DETACH DELETE n' def get_create_relationships_query( node1_label:str, node1_id:str, node1_col:str, node2_label: str, node2_id: str, node2_col: str, relationship:str, rel_properties=[], foreach=False, foreach_property='' ): """ Build the query to create relationships. :param node1_label: starting node label :param node1_id: starting node id property name (e.g. id, biocyc_id) :param node1_col: dataframe column name for the starting node id :param node2_label: ending node label :param node2_id: ending node id property name (e.g. id, biocyc_id) :param node2_col: dataframe column name for the ending node id :param relationship: the relationship type :param rel_properties: relationship properties """ rows = list() rows.append("UNWIND $rows AS row") rows.append("MATCH (a:%s {%s: row.%s}), (b:%s {%s: row.%s})" % ( node1_label, node1_id, node1_col, node2_label, node2_id, node2_col)) if foreach: rows.append("FOREACH (item IN CASE WHEN row.%s = '%s' THEN [1] ELSE [] END | MERGE (a)-[r:%s]->(b))" % ( foreach_property, relationship, relationship)) else: rows.append(f"MERGE (a)-[r:{relationship}]->(b)") prop_sets = [] if rel_properties: for prop in rel_properties: prop_sets.append(f"r.{prop}=row.{prop}") if prop_sets: set_phrase = ', '.join(prop_sets) rows.append(f"SET {set_phrase}") rows.append('RETURN COUNT(*)') return '\n'.join(rows) def get_create_synonym_relationships_query( node_label:str, node_id:str, node_id_col:str, synonym_col: str, rel_properties=[], return_node_count: bool=False ): """ Build the query to create node, then create relationship with another existing node using dataframe data. Dataframe need to transfer to dictionary using the following code: dict = {'rows': dataframe.to_dict('Records')} :param node_label: the node label :param node_id: the node id name, e.g. 'id', 'biocyc_id' :param node_id_col: the node id column name in the dataframe, e.g. 'start_id', 'string_id' :param synonym_col: the dataframe column name for synonym :param rel_properties: relationship properties for HAS_SYNONYM :return_node_count: If True, return COUNT(r). :return: cypher query with parameter $dict """ query_rows = list() query_rows.append("UNWIND $rows AS row") query_rows.append("MERGE (a:Synonym {name: row.%s}) SET a.lowercase_name=toLower(row.%s)" % (synonym_col, synonym_col)) query_rows.append("WITH row, a MATCH (b:%s {%s: row.%s})" % (node_label, node_id, node_id_col)) query_rows.append("MERGE (b)-[r:HAS_SYNONYM]->(a)") prop_sets = [] for prop in rel_properties: prop_sets.append(f"r.{prop}=row.{prop}") if prop_sets: set_phrase = ', '.join(prop_sets) query_rows.append(f"SET {set_phrase}") if return_node_count: query_rows.append('RETURN COUNT(r)') return '\n'.join(query_rows)
""" MTGJSON Set Builder """ import json import logging import pathlib import re import unicodedata import uuid from typing import Any, Dict, List, Optional, Set, Tuple from . import consts from .classes import ( MtgjsonCardObject, MtgjsonForeignDataObject, MtgjsonGameFormatsObject, MtgjsonLeadershipSkillsObject, MtgjsonLegalitiesObject, MtgjsonMetaObject, MtgjsonRulingObject, MtgjsonSetObject, ) from .consts import ( BASIC_LAND_NAMES, CARD_MARKET_BUFFER, FOREIGN_SETS, LANGUAGE_MAP, RESOURCE_PATH, SUPER_TYPES, ) from .providers import ( CardMarketProvider, GathererProvider, GitHubBoostersProvider, MTGBanProvider, ScryfallProvider, WhatsInStandardProvider, WizardsProvider, ) from .utils import get_str_or_none, parallel_call, url_keygen LOGGER = logging.getLogger(__name__) def parse_foreign( sf_prints_url: str, card_name: str, card_number: str, set_name: str ) -> List[MtgjsonForeignDataObject]: """ Get the foreign printings information for a specific card :param card_number: Card's number :param sf_prints_url: URL to get prints from :param card_name: Card name to parse (needed for double faced) :param set_name: Set name :return: Foreign entries object """ card_foreign_entries: List[MtgjsonForeignDataObject] = [] # Add information to get all languages sf_prints_url = sf_prints_url.replace("&unique=prints", "+lang%3Aany&unique=prints") prints_api_json: Dict[str, Any] = ScryfallProvider().download(sf_prints_url) if prints_api_json["object"] == "error": LOGGER.error(f"No data found for {sf_prints_url}: {prints_api_json}") return [] for foreign_card in prints_api_json["data"]: if ( set_name != foreign_card["set"] or card_number != foreign_card["collector_number"] or foreign_card["lang"] == "en" ): continue card_foreign_entry = MtgjsonForeignDataObject() try: card_foreign_entry.language = LANGUAGE_MAP[foreign_card["lang"]] except IndexError: LOGGER.warning(f"Unable to get language {foreign_card}") if foreign_card["multiverse_ids"]: card_foreign_entry.multiverse_id = foreign_card["multiverse_ids"][0] if "card_faces" in foreign_card: if card_name.lower() == foreign_card["name"].split("/")[0].strip().lower(): face = 0 else: face = 1 LOGGER.debug(f"Split card found: Using face {face} for {card_name}") card_foreign_entry.name = " // ".join( [ face_data.get("printed_name", face_data.get("name", "")) for face_data in foreign_card["card_faces"] ] ) foreign_card = foreign_card["card_faces"][face] card_foreign_entry.face_name = foreign_card.get("printed_name") if not card_foreign_entry.face_name: LOGGER.warning(f"Unable to resolve name for {foreign_card}") card_foreign_entry.face_name = foreign_card.get("name") if not card_foreign_entry.name: card_foreign_entry.name = foreign_card.get("printed_name") card_foreign_entry.text = foreign_card.get("printed_text") card_foreign_entry.flavor_text = foreign_card.get("flavor_text") card_foreign_entry.type = foreign_card.get("printed_type_line") if card_foreign_entry.name: card_foreign_entries.append(card_foreign_entry) return card_foreign_entries def parse_card_types(card_type: str) -> Tuple[List[str], List[str], List[str]]: """ Given a card type string, split it up into its raw components: super, sub, and type :param card_type: Card type string to parse :return: Tuple (super, type, sub) of the card's attributes """ sub_types: List[str] = [] super_types: List[str] = [] types: List[str] = [] supertypes_and_types: str if "—" not in card_type: supertypes_and_types = card_type else: split_type: List[str] = card_type.split("—") supertypes_and_types = split_type[0] subtypes: str = split_type[1] # Planes are an entire sub-type, whereas normal cards # are split by spaces if card_type.startswith("Plane"): sub_types = [subtypes.strip()] else: sub_types = [x.strip() for x in subtypes.split() if x] for value in supertypes_and_types.split(): if value in SUPER_TYPES: super_types.append(value) elif value: types.append(value) return super_types, types, sub_types def get_card_colors(mana_cost: str) -> List[str]: """ For some cards, we may have to manually determine the card's color. :param mana_cost: Mana cost string :return: Colors based on mana cost """ color_options: List[str] = ["W", "U", "B", "R", "G"] ret_val = [] for color in color_options: if color in mana_cost: ret_val.append(color) return ret_val def get_scryfall_set_data(set_code: str) -> Optional[Dict[str, Any]]: """ Get a Scryfall set header for a specific set :param set_code: Set to grab header for :return: Set header, if it exists """ set_data: Dict[str, Any] = ScryfallProvider().download( ScryfallProvider().ALL_SETS_URL + set_code ) if set_data["object"] == "error": LOGGER.error(f"Failed to download {set_code}") return None return set_data def is_number(string: str) -> bool: """See if a given string is a number (int or float)""" try: float(string) return True except ValueError: pass try: unicodedata.numeric(string) return True except (TypeError, ValueError): pass return False def get_card_cmc(mana_cost: str) -> float: """ For some cards, we may have to manually update the converted mana cost. We do this by reading the inner components of each pair of {} and deciphering what the contents mean. If number, we're good. Otherwise +1. :param mana_cost: Mana cost string :return: One sided cmc """ total: float = 0 symbol: List[str] = re.findall(r"{([^{]*)}", mana_cost.strip()) for element in symbol: # Address 2/W, G/W, etc as "higher" cost always first if "/" in element: element = element.split("/")[0] if is_number(element): total += float(element) elif element in ["X", "Y", "Z"]: # Placeholder mana continue elif element[0] == "H": # Half mana total += 0.5 else: total += 1 return total def parse_printings(sf_prints_url: Optional[str]) -> List[str]: """ Given a Scryfall printings URL, extract all sets a card was printed in :param sf_prints_url: URL to extract data from :return: List of all sets a specific card was printed in """ card_sets: Set[str] = set() while sf_prints_url: prints_api_json: Dict[str, Any] = ScryfallProvider().download(sf_prints_url) if prints_api_json["object"] == "error": LOGGER.error(f"Bad download: {sf_prints_url}") break for card in prints_api_json["data"]: card_sets.add(card.get("set").upper()) if not prints_api_json.get("has_more"): break sf_prints_url = prints_api_json.get("next_page") return sorted(list(card_sets)) def parse_legalities(sf_card_legalities: Dict[str, str]) -> MtgjsonLegalitiesObject: """ Given a Scryfall legalities dictionary, convert it to MTGJSON format :param sf_card_legalities: Scryfall legalities :return: MTGJSON legalities """ card_legalities = MtgjsonLegalitiesObject() for key, value in sf_card_legalities.items(): if value != "not_legal": setattr(card_legalities, key.lower(), value.capitalize()) return card_legalities def parse_rulings(rulings_url: str) -> List[MtgjsonRulingObject]: """ Get the JSON data from Scryfall and convert it to MTGJSON format for rulings :param rulings_url: URL to get Scryfall JSON data from :return: MTGJSON rulings list """ rules_api_json: Dict[str, Any] = ScryfallProvider().download(rulings_url) if rules_api_json["object"] == "error": LOGGER.error(f"Error downloading URL {rulings_url}: {rules_api_json}") return [] mtgjson_rules: List[MtgjsonRulingObject] = [] for sf_rule in rules_api_json["data"]: mtgjson_rule = MtgjsonRulingObject(sf_rule["published_at"], sf_rule["comment"]) mtgjson_rules.append(mtgjson_rule) return sorted(mtgjson_rules, key=lambda ruling: ruling.date) def relocate_miscellaneous_tokens(mtgjson_set: MtgjsonSetObject) -> None: """ Sometimes tokens find their way into the main set. This will remove them from the cards array and sets an internal market to be dealt with later down the line :param mtgjson_set: MTGJSON Set object """ LOGGER.info(f"Relocate tokens for {mtgjson_set.code}") token_types = {"token", "double_faced_token", "emblem", "art_series"} # Identify unique tokens from cards tokens_found = { card.identifiers.scryfall_id for card in mtgjson_set.cards if card.layout in token_types and card.identifiers.scryfall_id } # Remove tokens from cards mtgjson_set.cards[:] = ( card for card in mtgjson_set.cards if card.layout not in token_types ) # Scryfall objects to handle later mtgjson_set.extra_tokens = [ ScryfallProvider().download(ScryfallProvider().CARDS_URL + scryfall_id) for scryfall_id in tokens_found ] LOGGER.info(f"Finished relocating tokens for {mtgjson_set.code}") def mark_duel_decks(set_code: str, mtgjson_cards: List[MtgjsonCardObject]) -> None: """ For Duel Decks, we need to determine which "deck" the card can be found in. This is a convoluted, but correct, approach at solving that problem. :param set_code: Set to work on :param mtgjson_cards: Card Objects """ LOGGER.info(f"Marking duel deck status for {set_code}") if set_code.startswith("DD") or set_code in {"GS1"}: land_pile_marked = False side_letter_as_number = ord("a") for card in sorted(mtgjson_cards): if card.name in BASIC_LAND_NAMES: land_pile_marked = True elif any(_type in card.type for _type in ("Token", "Emblem")): continue elif land_pile_marked: side_letter_as_number += 1 land_pile_marked = False card.duel_deck = chr(side_letter_as_number) LOGGER.info(f"Finished marking duel deck status for {set_code}") def build_mtgjson_set(set_code: str) -> Optional[MtgjsonSetObject]: """ Construct a MTGJSON Magic Set :param set_code: Set to construct :return: Set object """ # Output Object mtgjson_set = MtgjsonSetObject() # Ensure we have a header for this set set_data = get_scryfall_set_data(set_code) if not set_data: return None # Explicit Variables mtgjson_set.name = set_data["name"].strip() mtgjson_set.code = set_data["code"].upper() mtgjson_set.type = set_data["set_type"] mtgjson_set.keyrune_code = pathlib.Path(set_data["icon_svg_uri"]).stem.upper() mtgjson_set.release_date = set_data["released_at"] mtgjson_set.mtgo_code = set_data.get("mtgo_code", "").upper() mtgjson_set.parent_code = set_data.get("parent_set_code", "").upper() mtgjson_set.block = set_data.get("block", "") mtgjson_set.is_online_only = set_data.get("digital", "") mtgjson_set.is_foil_only = set_data.get("foil_only", "") mtgjson_set.is_non_foil_only = set_data.get("nonfoil_only", "") mtgjson_set.search_uri = set_data["search_uri"] mtgjson_set.mcm_name = CardMarketProvider().get_set_name(mtgjson_set.name) mtgjson_set.mcm_id = CardMarketProvider().get_set_id(mtgjson_set.name) mtgjson_set.translations = WizardsProvider().get_translation_for_set( mtgjson_set.code ) base_total_sizes = get_base_and_total_set_sizes(set_code) mtgjson_set.base_set_size = base_total_sizes[0] mtgjson_set.total_set_size = base_total_sizes[1] # Building cards is a process mtgjson_set.cards = build_base_mtgjson_cards(set_code) add_is_starter_option(set_code, mtgjson_set.search_uri, mtgjson_set.cards) relocate_miscellaneous_tokens(mtgjson_set) add_variations_and_alternative_fields(mtgjson_set) add_mcm_details(mtgjson_set) add_card_kingdom_details(mtgjson_set) # Build tokens, a little less of a process mtgjson_set.tokens = build_base_mtgjson_tokens( f"T{set_code}", mtgjson_set.extra_tokens or [] ) mtgjson_set.tcgplayer_group_id = set_data.get("tcgplayer_id") mtgjson_set.booster = GitHubBoostersProvider().get_set_booster_data(set_code) mark_duel_decks(set_code, mtgjson_set.cards) # Implicit Variables mtgjson_set.is_foreign_only = mtgjson_set.code in FOREIGN_SETS mtgjson_set.is_partial_preview = MtgjsonMetaObject().date < mtgjson_set.release_date return mtgjson_set def build_base_mtgjson_tokens( set_code: str, added_tokens: List[Dict[str, Any]] ) -> List[MtgjsonCardObject]: """ Construct all tokens in MTGJSON format from a single set :param set_code: Set to build :param added_tokens: Additional tokens to build :return: Completed card objects """ return build_base_mtgjson_cards(set_code, added_tokens, True) def build_base_mtgjson_cards( set_code: str, additional_cards: List[Dict[str, Any]] = None, is_token: bool = False ) -> List[MtgjsonCardObject]: """ Construct all cards in MTGJSON format from a single set :param set_code: Set to build :param additional_cards: Additional objs to build (not relevant for normal builds) :param is_token: Are tokens being copmiled? :return: Completed card objects """ LOGGER.info(f"Building cards for {set_code}") cards = ScryfallProvider().download_cards(set_code) cards.extend(additional_cards or []) mtgjson_cards = parallel_call( build_mtgjson_card, cards, fold_list=True, repeatable_args=(0, is_token) ) LOGGER.info(f"Finished building cards for {set_code}") return list(mtgjson_cards) def add_is_starter_option( set_code: str, search_url: str, mtgjson_cards: List[MtgjsonCardObject] ) -> None: """ There are cards that may not exist in standard boosters. As such, we mark those as starter cards. :param set_code: Set to handle :param search_url: URL to search for cards in :param mtgjson_cards: Card Objects to modify """ LOGGER.info(f"Add starter data to {set_code}") starter_card_url = search_url.replace("&unique=", "++not:booster&unique=") starter_cards = ScryfallProvider().download(starter_card_url) if starter_cards["object"] == "error": LOGGER.debug(f"All cards in {set_code} are available in boosters") LOGGER.info(f"Finished adding starter data to {set_code}") return for scryfall_object in starter_cards["data"]: mtgjson_cards_with_same_id = [ item for item in mtgjson_cards if item.identifiers.scryfall_id == scryfall_object["id"] ] for card in mtgjson_cards_with_same_id: card.is_starter = True LOGGER.info(f"Finished adding starter data to {set_code}") def add_leadership_skills(mtgjson_card: MtgjsonCardObject) -> None: """ Determine if a card is able to be your commander, and if so which format(s). :param mtgjson_card: Card object """ is_commander_legal = ( "Legendary" in mtgjson_card.type and "Creature" in mtgjson_card.type # Exclude Flip cards and mtgjson_card.type not in {"flip"} # Exclude Melded cards and backside of Transform cards and (mtgjson_card.side == "a" if mtgjson_card.side else True) ) or ("can be your commander" in mtgjson_card.text) is_oathbreaker_legal = "Planeswalker" in mtgjson_card.type is_brawl_legal = mtgjson_card.set_code.upper() in WhatsInStandardProvider().set_codes and ( is_oathbreaker_legal or is_commander_legal ) if is_commander_legal or is_oathbreaker_legal or is_brawl_legal: mtgjson_card.leadership_skills = MtgjsonLeadershipSkillsObject( is_brawl_legal, is_commander_legal, is_oathbreaker_legal ) def add_uuid(mtgjson_card: MtgjsonCardObject) -> None: """ Construct a UUIDv5 for each MTGJSON card object This will also add UUIDv4 for legacy support :param mtgjson_card: Card object """ if {"Token", "Card"}.intersection(mtgjson_card.types): # Tokens have a special generation method id_source_v5 = ( mtgjson_card.name + (mtgjson_card.face_name or "") + "".join((mtgjson_card.colors or "")) + (mtgjson_card.power or "") + (mtgjson_card.toughness or "") + (mtgjson_card.side or "") + mtgjson_card.set_code[1:].lower() + (mtgjson_card.identifiers.scryfall_id or "") + (mtgjson_card.identifiers.scryfall_illustration_id or "") ) id_source_v4 = ( (mtgjson_card.face_name if mtgjson_card.face_name else mtgjson_card.name) + "".join((mtgjson_card.colors or "")) + (mtgjson_card.power or "") + (mtgjson_card.toughness or "") + (mtgjson_card.side or "") + mtgjson_card.set_code[1:].upper() + (mtgjson_card.identifiers.scryfall_id or "") ) else: # Normal cards only need a few pieces of data id_source_v5 = ( ScryfallProvider().get_class_id() + (mtgjson_card.identifiers.scryfall_id or "") + (mtgjson_card.identifiers.scryfall_illustration_id or "") + mtgjson_card.set_code.lower() + mtgjson_card.name + (mtgjson_card.face_name or "") ) id_source_v4 = ( "sf" + (mtgjson_card.identifiers.scryfall_id or "") + (mtgjson_card.face_name if mtgjson_card.face_name else mtgjson_card.name) ) mtgjson_card.uuid = str(uuid.uuid5(uuid.NAMESPACE_DNS, id_source_v5)) mtgjson_card.identifiers.mtgjson_v4_id = str( uuid.uuid5(uuid.NAMESPACE_DNS, id_source_v4) ) def build_mtgjson_card( scryfall_object: Dict[str, Any], face_id: int = 0, is_token: bool = False ) -> List[MtgjsonCardObject]: """ Construct a MTGJSON Card object from 3rd party entities :param scryfall_object: Scryfall Card Object :param face_id: What face to build for (set internally) :param is_token: Is this a token object? (some diff fields) :return: List of card objects that were constructed """ LOGGER.info(f"Building {scryfall_object["set"].upper()}: {scryfall_object["name"]}") # Return List mtgjson_cards = [] # Object Container mtgjson_card = MtgjsonCardObject(is_token) mtgjson_card.name = scryfall_object["name"] mtgjson_card.flavor_name = scryfall_object.get("flavor_name") mtgjson_card.set_code = scryfall_object["set"].lower() mtgjson_card.identifiers.scryfall_id = scryfall_object["id"] mtgjson_card.identifiers.scryfall_oracle_id = scryfall_object["oracle_id"] # Handle atypical cards face_data = scryfall_object if "card_faces" in scryfall_object: mtgjson_card.set_names(scryfall_object["name"].split("//")) # Override face_data from above face_data = scryfall_object["card_faces"][face_id] if "//" in scryfall_object.get("mana_cost", ""): mtgjson_card.colors = get_card_colors( scryfall_object["mana_cost"].split("//")[face_id] ) mtgjson_card.face_converted_mana_cost = get_card_cmc( scryfall_object["mana_cost"].split("//")[face_id] ) elif scryfall_object["layout"] in { "split", "transform", "aftermath", "adventure", }: mtgjson_card.face_converted_mana_cost = get_card_cmc( face_data.get("mana_cost", "0") ) mtgjson_card.set_watermark(scryfall_object["card_faces"][0].get("watermark")) if scryfall_object["card_faces"][-1]["oracle_text"].startswith("Aftermath"): mtgjson_card.layout = "aftermath" mtgjson_card.artist = scryfall_object["card_faces"][face_id].get("artist", "") if face_id == 0: for i in range(1, len(scryfall_object["card_faces"])): mtgjson_cards.extend(build_mtgjson_card(scryfall_object, i, is_token)) # Start of single card builder if face_data.get("mana_cost"): mtgjson_card.mana_cost = face_data["mana_cost"] mtgjson_card.identifiers.scryfall_illustration_id = scryfall_object.get( "illustration_id", face_data.get("illustration_id") ) if not mtgjson_card.colors: mtgjson_card.colors = ( face_data["colors"] if "colors" in face_data.keys() else scryfall_object["colors"] ) # Explicit Variables -- Based on the entire card object mtgjson_card.border_color = scryfall_object.get("border_color", "") mtgjson_card.color_identity = scryfall_object.get("color_identity", "") mtgjson_card.converted_mana_cost = scryfall_object.get("cmc", "") mtgjson_card.edhrec_rank = scryfall_object.get("edhrec_rank") mtgjson_card.frame_effects = scryfall_object.get("frame_effects", "") mtgjson_card.frame_version = scryfall_object.get("frame", "") mtgjson_card.hand = scryfall_object.get("hand_modifier") mtgjson_card.has_foil = scryfall_object.get("foil") mtgjson_card.has_non_foil = scryfall_object.get("nonfoil") mtgjson_card.has_content_warning = scryfall_object.get("content_warning") mtgjson_card.is_full_art = scryfall_object.get("full_art") mtgjson_card.is_online_only = scryfall_object.get("digital") mtgjson_card.is_oversized = scryfall_object.get("oversized") mtgjson_card.is_promo = scryfall_object.get("promo") mtgjson_card.is_reprint = scryfall_object.get("reprint") mtgjson_card.is_reserved = scryfall_object.get("reserved") mtgjson_card.is_story_spotlight = scryfall_object.get("story_spotlight") mtgjson_card.is_textless = scryfall_object.get("textless") mtgjson_card.life = scryfall_object.get("life_modifier") mtgjson_card.identifiers.mtg_arena_id = get_str_or_none( scryfall_object.get("arena_id") ) mtgjson_card.identifiers.mtgo_id = get_str_or_none(scryfall_object.get("mtgo_id")) mtgjson_card.identifiers.mtgo_foil_id = get_str_or_none( scryfall_object.get("mtgo_foil_id") ) mtgjson_card.number = scryfall_object.get("collector_number", "0") # Handle Promo Types for MTGJSON mtgjson_card.promo_types = scryfall_object.get("promo_types", []) if mtgjson_card.number.endswith("p"): mtgjson_card.promo_types.append("planeswalkerstamped") # Remove terms that are covered elsewhere mtgjson_card.promo_types = [ card_type for card_type in mtgjson_card.promo_types if card_type not in {"starterdeck", "planeswalkerdeck"} ] mtgjson_card.rarity = scryfall_object.get("rarity", "") if not mtgjson_card.artist: mtgjson_card.artist = scryfall_object.get("artist", "") if not mtgjson_card.layout: mtgjson_card.layout = scryfall_object.get("layout", "") if not mtgjson_card.watermark: mtgjson_card.set_watermark(face_data.get("watermark")) # Indicate if this component exists on the platform mtgjson_card.availability = MtgjsonGameFormatsObject() mtgjson_card.availability.arena = "arena" in scryfall_object.get("games", []) or ( mtgjson_card.identifiers.mtg_arena_id is not None ) mtgjson_card.availability.mtgo = "mtgo" in scryfall_object.get("games", []) or ( mtgjson_card.identifiers.mtgo_id is not None ) mtgjson_card.availability.paper = not mtgjson_card.is_online_only mtgjson_card.availability.shandalar = "astral" in scryfall_object.get("games", []) mtgjson_card.availability.dreamcast = "sega" in scryfall_object.get("games", []) # Explicit Variables -- Based on the face of the card mtgjson_card.loyalty = face_data.get("loyalty") ascii_name = ( unicodedata.normalize("NFD", mtgjson_card.name) .encode("ascii", "ignore") .decode() ) if mtgjson_card.name != ascii_name: LOGGER.debug(f"Adding ascii name for {mtgjson_card.name} -> {ascii_name}") mtgjson_card.ascii_name = ascii_name mtgjson_card.power = face_data.get("power", "") mtgjson_card.text = face_data.get("oracle_text", "") mtgjson_card.toughness = face_data.get("toughness", "") mtgjson_card.type = face_data.get("type_line", "Card") # Explicit -- Depending on if card face has it or not mtgjson_card.flavor_text = ( face_data.get("flavor_text") if face_data.get("flavor_text") else scryfall_object.get("flavor_text") ) if "color_indicator" in face_data.keys(): mtgjson_card.color_indicator = face_data["color_indicator"] elif "color_indicator" in scryfall_object.keys(): mtgjson_card.color_indicator = scryfall_object["color_indicator"] if scryfall_object["multiverse_ids"]: if len(scryfall_object["multiverse_ids"]) > face_id: mtgjson_card.identifiers.multiverse_id = get_str_or_none( scryfall_object["multiverse_ids"][face_id] ) else: mtgjson_card.identifiers.multiverse_id = get_str_or_none( scryfall_object["multiverse_ids"][0] ) # Add "side" for split cards (cards with exactly 2 sides) # Also set face name if mtgjson_card.get_names(): mtgjson_card.face_name = str(face_data["name"]) if mtgjson_card.layout not in ["meld"]: # Fix #632 as there are very limited distinguishing attributes if mtgjson_card.set_code == "tust": mtgjson_card.side = "a" if mtgjson_card.type != "Token" else "b" else: # chr(97) = 'a', chr(98) = 'b', ... mtgjson_card.side = chr( mtgjson_card.get_names().index(mtgjson_card.face_name) + 97 ) # Implicit Variables mtgjson_card.is_timeshifted = ( scryfall_object.get("frame") == "future" or mtgjson_card.set_code == "tsb" ) mtgjson_card.printings = parse_printings( scryfall_object["prints_search_uri"].replace("%22", "") ) mtgjson_card.legalities = parse_legalities(scryfall_object["legalities"]) mtgjson_card.rulings = parse_rulings(scryfall_object["rulings_uri"]) card_types = parse_card_types(mtgjson_card.type) mtgjson_card.supertypes = card_types[0] mtgjson_card.types = card_types[1] mtgjson_card.subtypes = card_types[2] if "Planeswalker" in mtgjson_card.types: mtgjson_card.text = re.sub(r"([+−-]?[0-9X]+):", r"[\1]:", mtgjson_card.text) # Keywords have to be split up on our end for individual card faces mtgjson_card.keywords = [ keyword for keyword in sorted(scryfall_object.get("keywords", [])) if keyword.lower() in mtgjson_card.text.lower() ] # Handle Meld components, as well as tokens if "all_parts" in scryfall_object.keys(): meld_object = [] mtgjson_card.set_names(None) for a_part in scryfall_object["all_parts"]: if a_part["component"] != "token": if "//" in a_part.get("name"): mtgjson_card.set_names(a_part.get("name").split("//")) break # This is a meld only-fix, so we ignore tokens/combo pieces if "meld" in a_part["component"]: meld_object.append(a_part["component"]) mtgjson_card.append_names(a_part.get("name")) # If the only entry is the original card, empty the names array if ( mtgjson_card.get_names() and len(mtgjson_card.get_names()) == 1 and mtgjson_card.name in mtgjson_card.get_names() ): mtgjson_card.set_names(None) # Meld cards should be CardA, Meld, CardB. if ( len(meld_object) == 3 and meld_object[1] != "meld_result" and mtgjson_card.get_names() ): mtgjson_card.set_names( [ mtgjson_card.get_names()[0], mtgjson_card.get_names()[2], mtgjson_card.get_names()[1], ] ) # Meld Object if mtgjson_card.get_names() and len(mtgjson_card.get_names()) == 3: # Front Sides will become Front1//Back, Front2//Back # Back Side will just be Back if mtgjson_card.name != mtgjson_card.get_names()[1]: mtgjson_card.side = "a" mtgjson_card.face_name = mtgjson_card.name mtgjson_card.name = ( f"{mtgjson_card.name} // {mtgjson_card.get_names()[1]}" ) else: mtgjson_card.face_name = mtgjson_card.name mtgjson_card.side = "b" mtgjson_card.foreign_data = parse_foreign( scryfall_object["prints_search_uri"].replace("%22", ""), mtgjson_card.face_name if mtgjson_card.face_name else mtgjson_card.name, mtgjson_card.number, mtgjson_card.set_code, ) if mtgjson_card.name in ScryfallProvider().cards_without_limits: mtgjson_card.has_alternative_deck_limit = True add_uuid(mtgjson_card) add_leadership_skills(mtgjson_card) # Add purchase URL components after UUIDs are finalized mtgjson_card.raw_purchase_urls.update(scryfall_object.get("purchase_uris", {})) if "tcgplayer_id" in scryfall_object: mtgjson_card.identifiers.tcgplayer_product_id = str( scryfall_object["tcgplayer_id"] ) mtgjson_card.purchase_urls.tcgplayer = url_keygen( mtgjson_card.identifiers.tcgplayer_product_id + mtgjson_card.uuid ) if is_token: reverse_related: List[str] = [] if "all_parts" in scryfall_object: for a_part in scryfall_object["all_parts"]: if a_part.get("name") != mtgjson_card.name: reverse_related.append(a_part.get("name")) mtgjson_card.reverse_related = reverse_related # Gatherer Calls -- SLOWWWWW if mtgjson_card.identifiers.multiverse_id: gatherer_cards = GathererProvider().get_cards( mtgjson_card.identifiers.multiverse_id, mtgjson_card.set_code ) if len(gatherer_cards) > face_id: mtgjson_card.original_type = gatherer_cards[face_id].original_types mtgjson_card.original_text = gatherer_cards[face_id].original_text mtgjson_cards.append(mtgjson_card) return mtgjson_cards def add_variations_and_alternative_fields(mtgjson_set: MtgjsonSetObject) -> None: """ Set the variations, other_face_ids, and is_alternative statuses for all cards within the set object :param mtgjson_set: MTGJSON Set Object to modify """ if not mtgjson_set.cards: return LOGGER.info(f"Adding variations for {mtgjson_set.code}") for this_card in mtgjson_set.cards: # Adds other face ID list if this_card.get_names(): this_card.other_face_ids = [] for other_card in mtgjson_set.cards: if other_card.face_name not in this_card.get_names(): continue if other_card.uuid == this_card.uuid: continue if this_card.layout == "meld": # Meld cards should account for the other sides if this_card.side != other_card.side: this_card.other_face_ids.append(other_card.uuid) elif other_card.number: # Most split cards should have the same number if other_card.number == this_card.number: this_card.other_face_ids.append(other_card.uuid) else: # No number? No problem, just add it! this_card.other_face_ids.append(other_card.uuid) # Adds variations variations = [ item.uuid for item in mtgjson_set.cards if item.name.split(" (")[0] == this_card.name.split(" (")[0] and item.face_name == this_card.face_name and item.uuid != this_card.uuid and (item.number != this_card.number if item.number else True) ] if variations: this_card.variations = variations # Add alternative tag # Ignore singleton printings in set, as well as basics if not variations or this_card.name in BASIC_LAND_NAMES: continue # Some hardcoded checking due to inconsistencies upstream if mtgjson_set.code.upper() in ["UNH", "10E"]: # Check for duplicates, mark the foils if ( len(variations) >= 1 and this_card.has_foil and not this_card.has_non_foil ): this_card.is_alternative = True elif mtgjson_set.code.upper() in ["CN2", "BBD"]: # Check for set number > set size if int(this_card.number.replace(chr(9733), "")) > mtgjson_set.base_set_size: this_card.is_alternative = True else: # Check for a star in the number if chr(9733) in this_card.number: this_card.is_alternative = True LOGGER.info(f"Finished adding variations for {mtgjson_set.code}") def add_card_kingdom_details(mtgjson_set: MtgjsonSetObject) -> None: """ Add the CardKingdom components, like IDs and purchase URLs :param mtgjson_set: MTGJSON Set """ LOGGER.info(f"Adding CK details for {mtgjson_set.code}") translation_table = MTGBanProvider().get_mtgjson_to_card_kingdom() for mtgjson_card in mtgjson_set.cards: if mtgjson_card.uuid not in translation_table: continue entry = translation_table[mtgjson_card.uuid] if "normal" in entry: mtgjson_card.identifiers.card_kingdom_id = str(entry["normal"]["id"]) mtgjson_card.purchase_urls.card_kingdom = url_keygen( entry["normal"]["url"] + mtgjson_card.uuid ) mtgjson_card.raw_purchase_urls.update( {"cardKingdom": entry["normal"]["url"] + consts.CARD_KINGDOM_REFERRAL} ) if "foil" in entry: mtgjson_card.identifiers.card_kingdom_foil_id = str(entry["foil"]["id"]) mtgjson_card.purchase_urls.card_kingdom_foil = url_keygen( entry["foil"]["url"] + mtgjson_card.uuid ) mtgjson_card.raw_purchase_urls.update( {"cardKingdomFoil": entry["foil"]["url"] + consts.CARD_KINGDOM_REFERRAL} ) LOGGER.info(f"Finished adding CK details for {mtgjson_set.code}") def add_mcm_details(mtgjson_set: MtgjsonSetObject) -> None: """ Add the MKM components to a set's cards and tokens :param mtgjson_set: MTGJSON Set """ LOGGER.info(f"Adding MCM details for {mtgjson_set.code}") mkm_cards = CardMarketProvider().get_mkm_cards(mtgjson_set.mcm_id) for mtgjson_card in mtgjson_set.cards: delete_key = False # There are multiple ways MKM represents cards... if mtgjson_card.name.lower() in mkm_cards.keys(): # First lets see if the card name is found card_key = mtgjson_card.name.lower() elif ( mtgjson_card.face_name and mtgjson_card.face_name.lower() in mkm_cards.keys() ): # If that failed, lets see if the face name is found card_key = mtgjson_card.face_name.lower() elif mtgjson_card.name.replace("//", "/").lower() in mkm_cards.keys(): # Finally, lets check if they used a single slash for split-type cards card_key = mtgjson_card.name.replace("//", "/").lower() else: # Multiple printings of a card in the set... just guess at this point card_key = "" for mkm_card in mkm_cards: if mkm_card.startswith(mtgjson_card.name.lower()): card_key = mkm_card delete_key = True break if not card_key: LOGGER.debug(f"Failed to find {mtgjson_card.name} for MKM") continue mkm_obj = mkm_cards[card_key] if delete_key: del mkm_cards[card_key] mtgjson_card.identifiers.mcm_id = str(mkm_obj["idProduct"]) mtgjson_card.identifiers.mcm_meta_id = str(mkm_obj["idMetaproduct"]) mtgjson_card.purchase_urls.cardmarket = url_keygen( mtgjson_card.identifiers.mcm_id + mtgjson_card.uuid + CARD_MARKET_BUFFER + mtgjson_card.identifiers.mcm_meta_id ) LOGGER.info(f"Finished adding MCM details for {mtgjson_set.code}") def get_base_and_total_set_sizes(set_code: str) -> Tuple[int, int]: """ Get the size of a set from scryfall or corrections file :param set_code: Set code, upper case :return: Amount of cards in set (base, total) """ # Load cache if not loaded with RESOURCE_PATH.joinpath("base_set_sizes.json").open(encoding="utf-8") as f: base_set_size_override = json.load(f) if set_code in base_set_size_override.keys(): # Manual correction base_set_size = int(base_set_size_override[set_code]) else: # Download on the fly base_set_size_download = ScryfallProvider().download( ScryfallProvider().CARDS_IN_BASE_SET_URL.format(set_code) ) # Wasn't able to determine, so use all cards instead if base_set_size_download["object"] == "error": base_set_size_download = ScryfallProvider().download( ScryfallProvider().CARDS_IN_SET.format(set_code) ) base_set_size = int(base_set_size_download.get("total_cards", 0)) total_set_size = len(ScryfallProvider().download_cards(set_code)) return base_set_size, total_set_size
""" MTGJSON Set Builder """ import json import logging import pathlib import re import unicodedata import uuid from typing import Any, Dict, List, Optional, Set, Tuple from . import consts from .classes import ( MtgjsonCardObject, MtgjsonForeignDataObject, MtgjsonGameFormatsObject, MtgjsonLeadershipSkillsObject, MtgjsonLegalitiesObject, MtgjsonMetaObject, MtgjsonRulingObject, MtgjsonSetObject, ) from .consts import ( BASIC_LAND_NAMES, CARD_MARKET_BUFFER, FOREIGN_SETS, LANGUAGE_MAP, RESOURCE_PATH, SUPER_TYPES, ) from .providers import ( CardMarketProvider, GathererProvider, GitHubBoostersProvider, MTGBanProvider, ScryfallProvider, WhatsInStandardProvider, WizardsProvider, ) from .utils import get_str_or_none, parallel_call, url_keygen LOGGER = logging.getLogger(__name__) def parse_foreign( sf_prints_url: str, card_name: str, card_number: str, set_name: str ) -> List[MtgjsonForeignDataObject]: """ Get the foreign printings information for a specific card :param card_number: Card's number :param sf_prints_url: URL to get prints from :param card_name: Card name to parse (needed for double faced) :param set_name: Set name :return: Foreign entries object """ card_foreign_entries: List[MtgjsonForeignDataObject] = [] # Add information to get all languages sf_prints_url = sf_prints_url.replace("&unique=prints", "+lang%3Aany&unique=prints") prints_api_json: Dict[str, Any] = ScryfallProvider().download(sf_prints_url) if prints_api_json["object"] == "error": LOGGER.error(f"No data found for {sf_prints_url}: {prints_api_json}") return [] for foreign_card in prints_api_json["data"]: if ( set_name != foreign_card["set"] or card_number != foreign_card["collector_number"] or foreign_card["lang"] == "en" ): continue card_foreign_entry = MtgjsonForeignDataObject() try: card_foreign_entry.language = LANGUAGE_MAP[foreign_card["lang"]] except IndexError: LOGGER.warning(f"Unable to get language {foreign_card}") if foreign_card["multiverse_ids"]: card_foreign_entry.multiverse_id = foreign_card["multiverse_ids"][0] if "card_faces" in foreign_card: if card_name.lower() == foreign_card["name"].split("/")[0].strip().lower(): face = 0 else: face = 1 LOGGER.debug(f"Split card found: Using face {face} for {card_name}") card_foreign_entry.name = " // ".join( [ face_data.get("printed_name", face_data.get("name", "")) for face_data in foreign_card["card_faces"] ] ) foreign_card = foreign_card["card_faces"][face] card_foreign_entry.face_name = foreign_card.get("printed_name") if not card_foreign_entry.face_name: LOGGER.warning(f"Unable to resolve name for {foreign_card}") card_foreign_entry.face_name = foreign_card.get("name") if not card_foreign_entry.name: card_foreign_entry.name = foreign_card.get("printed_name") card_foreign_entry.text = foreign_card.get("printed_text") card_foreign_entry.flavor_text = foreign_card.get("flavor_text") card_foreign_entry.type = foreign_card.get("printed_type_line") if card_foreign_entry.name: card_foreign_entries.append(card_foreign_entry) return card_foreign_entries def parse_card_types(card_type: str) -> Tuple[List[str], List[str], List[str]]: """ Given a card type string, split it up into its raw components: super, sub, and type :param card_type: Card type string to parse :return: Tuple (super, type, sub) of the card's attributes """ sub_types: List[str] = [] super_types: List[str] = [] types: List[str] = [] supertypes_and_types: str if "—" not in card_type: supertypes_and_types = card_type else: split_type: List[str] = card_type.split("—") supertypes_and_types = split_type[0] subtypes: str = split_type[1] # Planes are an entire sub-type, whereas normal cards # are split by spaces if card_type.startswith("Plane"): sub_types = [subtypes.strip()] else: sub_types = [x.strip() for x in subtypes.split() if x] for value in supertypes_and_types.split(): if value in SUPER_TYPES: super_types.append(value) elif value: types.append(value) return super_types, types, sub_types def get_card_colors(mana_cost: str) -> List[str]: """ For some cards, we may have to manually determine the card's color. :param mana_cost: Mana cost string :return: Colors based on mana cost """ color_options: List[str] = ["W", "U", "B", "R", "G"] ret_val = [] for color in color_options: if color in mana_cost: ret_val.append(color) return ret_val def get_scryfall_set_data(set_code: str) -> Optional[Dict[str, Any]]: """ Get a Scryfall set header for a specific set :param set_code: Set to grab header for :return: Set header, if it exists """ set_data: Dict[str, Any] = ScryfallProvider().download( ScryfallProvider().ALL_SETS_URL + set_code ) if set_data["object"] == "error": LOGGER.error(f"Failed to download {set_code}") return None return set_data def is_number(string: str) -> bool: """See if a given string is a number (int or float)""" try: float(string) return True except ValueError: pass try: unicodedata.numeric(string) return True except (TypeError, ValueError): pass return False def get_card_cmc(mana_cost: str) -> float: """ For some cards, we may have to manually update the converted mana cost. We do this by reading the inner components of each pair of {} and deciphering what the contents mean. If number, we're good. Otherwise +1. :param mana_cost: Mana cost string :return: One sided cmc """ total: float = 0 symbol: List[str] = re.findall(r"{([^{]*)}", mana_cost.strip()) for element in symbol: # Address 2/W, G/W, etc as "higher" cost always first if "/" in element: element = element.split("/")[0] if is_number(element): total += float(element) elif element in ["X", "Y", "Z"]: # Placeholder mana continue elif element[0] == "H": # Half mana total += 0.5 else: total += 1 return total def parse_printings(sf_prints_url: Optional[str]) -> List[str]: """ Given a Scryfall printings URL, extract all sets a card was printed in :param sf_prints_url: URL to extract data from :return: List of all sets a specific card was printed in """ card_sets: Set[str] = set() while sf_prints_url: prints_api_json: Dict[str, Any] = ScryfallProvider().download(sf_prints_url) if prints_api_json["object"] == "error": LOGGER.error(f"Bad download: {sf_prints_url}") break for card in prints_api_json["data"]: card_sets.add(card.get("set").upper()) if not prints_api_json.get("has_more"): break sf_prints_url = prints_api_json.get("next_page") return sorted(list(card_sets)) def parse_legalities(sf_card_legalities: Dict[str, str]) -> MtgjsonLegalitiesObject: """ Given a Scryfall legalities dictionary, convert it to MTGJSON format :param sf_card_legalities: Scryfall legalities :return: MTGJSON legalities """ card_legalities = MtgjsonLegalitiesObject() for key, value in sf_card_legalities.items(): if value != "not_legal": setattr(card_legalities, key.lower(), value.capitalize()) return card_legalities def parse_rulings(rulings_url: str) -> List[MtgjsonRulingObject]: """ Get the JSON data from Scryfall and convert it to MTGJSON format for rulings :param rulings_url: URL to get Scryfall JSON data from :return: MTGJSON rulings list """ rules_api_json: Dict[str, Any] = ScryfallProvider().download(rulings_url) if rules_api_json["object"] == "error": LOGGER.error(f"Error downloading URL {rulings_url}: {rules_api_json}") return [] mtgjson_rules: List[MtgjsonRulingObject] = [] for sf_rule in rules_api_json["data"]: mtgjson_rule = MtgjsonRulingObject(sf_rule["published_at"], sf_rule["comment"]) mtgjson_rules.append(mtgjson_rule) return sorted(mtgjson_rules, key=lambda ruling: ruling.date) def relocate_miscellaneous_tokens(mtgjson_set: MtgjsonSetObject) -> None: """ Sometimes tokens find their way into the main set. This will remove them from the cards array and sets an internal market to be dealt with later down the line :param mtgjson_set: MTGJSON Set object """ LOGGER.info(f"Relocate tokens for {mtgjson_set.code}") token_types = {"token", "double_faced_token", "emblem", "art_series"} # Identify unique tokens from cards tokens_found = { card.identifiers.scryfall_id for card in mtgjson_set.cards if card.layout in token_types and card.identifiers.scryfall_id } # Remove tokens from cards mtgjson_set.cards[:] = ( card for card in mtgjson_set.cards if card.layout not in token_types ) # Scryfall objects to handle later mtgjson_set.extra_tokens = [ ScryfallProvider().download(ScryfallProvider().CARDS_URL + scryfall_id) for scryfall_id in tokens_found ] LOGGER.info(f"Finished relocating tokens for {mtgjson_set.code}") def mark_duel_decks(set_code: str, mtgjson_cards: List[MtgjsonCardObject]) -> None: """ For Duel Decks, we need to determine which "deck" the card can be found in. This is a convoluted, but correct, approach at solving that problem. :param set_code: Set to work on :param mtgjson_cards: Card Objects """ LOGGER.info(f"Marking duel deck status for {set_code}") if set_code.startswith("DD") or set_code in {"GS1"}: land_pile_marked = False side_letter_as_number = ord("a") for card in sorted(mtgjson_cards): if card.name in BASIC_LAND_NAMES: land_pile_marked = True elif any(_type in card.type for _type in ("Token", "Emblem")): continue elif land_pile_marked: side_letter_as_number += 1 land_pile_marked = False card.duel_deck = chr(side_letter_as_number) LOGGER.info(f"Finished marking duel deck status for {set_code}") def build_mtgjson_set(set_code: str) -> Optional[MtgjsonSetObject]: """ Construct a MTGJSON Magic Set :param set_code: Set to construct :return: Set object """ # Output Object mtgjson_set = MtgjsonSetObject() # Ensure we have a header for this set set_data = get_scryfall_set_data(set_code) if not set_data: return None # Explicit Variables mtgjson_set.name = set_data["name"].strip() mtgjson_set.code = set_data["code"].upper() mtgjson_set.type = set_data["set_type"] mtgjson_set.keyrune_code = pathlib.Path(set_data["icon_svg_uri"]).stem.upper() mtgjson_set.release_date = set_data["released_at"] mtgjson_set.mtgo_code = set_data.get("mtgo_code", "").upper() mtgjson_set.parent_code = set_data.get("parent_set_code", "").upper() mtgjson_set.block = set_data.get("block", "") mtgjson_set.is_online_only = set_data.get("digital", "") mtgjson_set.is_foil_only = set_data.get("foil_only", "") mtgjson_set.is_non_foil_only = set_data.get("nonfoil_only", "") mtgjson_set.search_uri = set_data["search_uri"] mtgjson_set.mcm_name = CardMarketProvider().get_set_name(mtgjson_set.name) mtgjson_set.mcm_id = CardMarketProvider().get_set_id(mtgjson_set.name) mtgjson_set.translations = WizardsProvider().get_translation_for_set( mtgjson_set.code ) base_total_sizes = get_base_and_total_set_sizes(set_code) mtgjson_set.base_set_size = base_total_sizes[0] mtgjson_set.total_set_size = base_total_sizes[1] # Building cards is a process mtgjson_set.cards = build_base_mtgjson_cards(set_code) add_is_starter_option(set_code, mtgjson_set.search_uri, mtgjson_set.cards) relocate_miscellaneous_tokens(mtgjson_set) add_variations_and_alternative_fields(mtgjson_set) add_mcm_details(mtgjson_set) add_card_kingdom_details(mtgjson_set) # Build tokens, a little less of a process mtgjson_set.tokens = build_base_mtgjson_tokens( f"T{set_code}", mtgjson_set.extra_tokens or [] ) mtgjson_set.tcgplayer_group_id = set_data.get("tcgplayer_id") mtgjson_set.booster = GitHubBoostersProvider().get_set_booster_data(set_code) mark_duel_decks(set_code, mtgjson_set.cards) # Implicit Variables mtgjson_set.is_foreign_only = mtgjson_set.code in FOREIGN_SETS mtgjson_set.is_partial_preview = MtgjsonMetaObject().date < mtgjson_set.release_date return mtgjson_set def build_base_mtgjson_tokens( set_code: str, added_tokens: List[Dict[str, Any]] ) -> List[MtgjsonCardObject]: """ Construct all tokens in MTGJSON format from a single set :param set_code: Set to build :param added_tokens: Additional tokens to build :return: Completed card objects """ return build_base_mtgjson_cards(set_code, added_tokens, True) def build_base_mtgjson_cards( set_code: str, additional_cards: List[Dict[str, Any]] = None, is_token: bool = False ) -> List[MtgjsonCardObject]: """ Construct all cards in MTGJSON format from a single set :param set_code: Set to build :param additional_cards: Additional objs to build (not relevant for normal builds) :param is_token: Are tokens being copmiled? :return: Completed card objects """ LOGGER.info(f"Building cards for {set_code}") cards = ScryfallProvider().download_cards(set_code) cards.extend(additional_cards or []) mtgjson_cards = parallel_call( build_mtgjson_card, cards, fold_list=True, repeatable_args=(0, is_token) ) LOGGER.info(f"Finished building cards for {set_code}") return list(mtgjson_cards) def add_is_starter_option( set_code: str, search_url: str, mtgjson_cards: List[MtgjsonCardObject] ) -> None: """ There are cards that may not exist in standard boosters. As such, we mark those as starter cards. :param set_code: Set to handle :param search_url: URL to search for cards in :param mtgjson_cards: Card Objects to modify """ LOGGER.info(f"Add starter data to {set_code}") starter_card_url = search_url.replace("&unique=", "++not:booster&unique=") starter_cards = ScryfallProvider().download(starter_card_url) if starter_cards["object"] == "error": LOGGER.debug(f"All cards in {set_code} are available in boosters") LOGGER.info(f"Finished adding starter data to {set_code}") return for scryfall_object in starter_cards["data"]: mtgjson_cards_with_same_id = [ item for item in mtgjson_cards if item.identifiers.scryfall_id == scryfall_object["id"] ] for card in mtgjson_cards_with_same_id: card.is_starter = True LOGGER.info(f"Finished adding starter data to {set_code}") def add_leadership_skills(mtgjson_card: MtgjsonCardObject) -> None: """ Determine if a card is able to be your commander, and if so which format(s). :param mtgjson_card: Card object """ is_commander_legal = ( "Legendary" in mtgjson_card.type and "Creature" in mtgjson_card.type # Exclude Flip cards and mtgjson_card.type not in {"flip"} # Exclude Melded cards and backside of Transform cards and (mtgjson_card.side == "a" if mtgjson_card.side else True) ) or ("can be your commander" in mtgjson_card.text) is_oathbreaker_legal = "Planeswalker" in mtgjson_card.type is_brawl_legal = mtgjson_card.set_code.upper() in WhatsInStandardProvider().set_codes and ( is_oathbreaker_legal or is_commander_legal ) if is_commander_legal or is_oathbreaker_legal or is_brawl_legal: mtgjson_card.leadership_skills = MtgjsonLeadershipSkillsObject( is_brawl_legal, is_commander_legal, is_oathbreaker_legal ) def add_uuid(mtgjson_card: MtgjsonCardObject) -> None: """ Construct a UUIDv5 for each MTGJSON card object This will also add UUIDv4 for legacy support :param mtgjson_card: Card object """ if {"Token", "Card"}.intersection(mtgjson_card.types): # Tokens have a special generation method id_source_v5 = ( mtgjson_card.name + (mtgjson_card.face_name or "") + "".join((mtgjson_card.colors or "")) + (mtgjson_card.power or "") + (mtgjson_card.toughness or "") + (mtgjson_card.side or "") + mtgjson_card.set_code[1:].lower() + (mtgjson_card.identifiers.scryfall_id or "") + (mtgjson_card.identifiers.scryfall_illustration_id or "") ) id_source_v4 = ( (mtgjson_card.face_name if mtgjson_card.face_name else mtgjson_card.name) + "".join((mtgjson_card.colors or "")) + (mtgjson_card.power or "") + (mtgjson_card.toughness or "") + (mtgjson_card.side or "") + mtgjson_card.set_code[1:].upper() + (mtgjson_card.identifiers.scryfall_id or "") ) else: # Normal cards only need a few pieces of data id_source_v5 = ( ScryfallProvider().get_class_id() + (mtgjson_card.identifiers.scryfall_id or "") + (mtgjson_card.identifiers.scryfall_illustration_id or "") + mtgjson_card.set_code.lower() + mtgjson_card.name + (mtgjson_card.face_name or "") ) id_source_v4 = ( "sf" + (mtgjson_card.identifiers.scryfall_id or "") + (mtgjson_card.face_name if mtgjson_card.face_name else mtgjson_card.name) ) mtgjson_card.uuid = str(uuid.uuid5(uuid.NAMESPACE_DNS, id_source_v5)) mtgjson_card.identifiers.mtgjson_v4_id = str( uuid.uuid5(uuid.NAMESPACE_DNS, id_source_v4) ) def build_mtgjson_card( scryfall_object: Dict[str, Any], face_id: int = 0, is_token: bool = False ) -> List[MtgjsonCardObject]: """ Construct a MTGJSON Card object from 3rd party entities :param scryfall_object: Scryfall Card Object :param face_id: What face to build for (set internally) :param is_token: Is this a token object? (some diff fields) :return: List of card objects that were constructed """ LOGGER.info(f"Building {scryfall_object['set'].upper()}: {scryfall_object['name']}") # Return List mtgjson_cards = [] # Object Container mtgjson_card = MtgjsonCardObject(is_token) mtgjson_card.name = scryfall_object["name"] mtgjson_card.flavor_name = scryfall_object.get("flavor_name") mtgjson_card.set_code = scryfall_object["set"].lower() mtgjson_card.identifiers.scryfall_id = scryfall_object["id"] mtgjson_card.identifiers.scryfall_oracle_id = scryfall_object["oracle_id"] # Handle atypical cards face_data = scryfall_object if "card_faces" in scryfall_object: mtgjson_card.set_names(scryfall_object["name"].split("//")) # Override face_data from above face_data = scryfall_object["card_faces"][face_id] if "//" in scryfall_object.get("mana_cost", ""): mtgjson_card.colors = get_card_colors( scryfall_object["mana_cost"].split("//")[face_id] ) mtgjson_card.face_converted_mana_cost = get_card_cmc( scryfall_object["mana_cost"].split("//")[face_id] ) elif scryfall_object["layout"] in { "split", "transform", "aftermath", "adventure", }: mtgjson_card.face_converted_mana_cost = get_card_cmc( face_data.get("mana_cost", "0") ) mtgjson_card.set_watermark(scryfall_object["card_faces"][0].get("watermark")) if scryfall_object["card_faces"][-1]["oracle_text"].startswith("Aftermath"): mtgjson_card.layout = "aftermath" mtgjson_card.artist = scryfall_object["card_faces"][face_id].get("artist", "") if face_id == 0: for i in range(1, len(scryfall_object["card_faces"])): mtgjson_cards.extend(build_mtgjson_card(scryfall_object, i, is_token)) # Start of single card builder if face_data.get("mana_cost"): mtgjson_card.mana_cost = face_data["mana_cost"] mtgjson_card.identifiers.scryfall_illustration_id = scryfall_object.get( "illustration_id", face_data.get("illustration_id") ) if not mtgjson_card.colors: mtgjson_card.colors = ( face_data["colors"] if "colors" in face_data.keys() else scryfall_object["colors"] ) # Explicit Variables -- Based on the entire card object mtgjson_card.border_color = scryfall_object.get("border_color", "") mtgjson_card.color_identity = scryfall_object.get("color_identity", "") mtgjson_card.converted_mana_cost = scryfall_object.get("cmc", "") mtgjson_card.edhrec_rank = scryfall_object.get("edhrec_rank") mtgjson_card.frame_effects = scryfall_object.get("frame_effects", "") mtgjson_card.frame_version = scryfall_object.get("frame", "") mtgjson_card.hand = scryfall_object.get("hand_modifier") mtgjson_card.has_foil = scryfall_object.get("foil") mtgjson_card.has_non_foil = scryfall_object.get("nonfoil") mtgjson_card.has_content_warning = scryfall_object.get("content_warning") mtgjson_card.is_full_art = scryfall_object.get("full_art") mtgjson_card.is_online_only = scryfall_object.get("digital") mtgjson_card.is_oversized = scryfall_object.get("oversized") mtgjson_card.is_promo = scryfall_object.get("promo") mtgjson_card.is_reprint = scryfall_object.get("reprint") mtgjson_card.is_reserved = scryfall_object.get("reserved") mtgjson_card.is_story_spotlight = scryfall_object.get("story_spotlight") mtgjson_card.is_textless = scryfall_object.get("textless") mtgjson_card.life = scryfall_object.get("life_modifier") mtgjson_card.identifiers.mtg_arena_id = get_str_or_none( scryfall_object.get("arena_id") ) mtgjson_card.identifiers.mtgo_id = get_str_or_none(scryfall_object.get("mtgo_id")) mtgjson_card.identifiers.mtgo_foil_id = get_str_or_none( scryfall_object.get("mtgo_foil_id") ) mtgjson_card.number = scryfall_object.get("collector_number", "0") # Handle Promo Types for MTGJSON mtgjson_card.promo_types = scryfall_object.get("promo_types", []) if mtgjson_card.number.endswith("p"): mtgjson_card.promo_types.append("planeswalkerstamped") # Remove terms that are covered elsewhere mtgjson_card.promo_types = [ card_type for card_type in mtgjson_card.promo_types if card_type not in {"starterdeck", "planeswalkerdeck"} ] mtgjson_card.rarity = scryfall_object.get("rarity", "") if not mtgjson_card.artist: mtgjson_card.artist = scryfall_object.get("artist", "") if not mtgjson_card.layout: mtgjson_card.layout = scryfall_object.get("layout", "") if not mtgjson_card.watermark: mtgjson_card.set_watermark(face_data.get("watermark")) # Indicate if this component exists on the platform mtgjson_card.availability = MtgjsonGameFormatsObject() mtgjson_card.availability.arena = "arena" in scryfall_object.get("games", []) or ( mtgjson_card.identifiers.mtg_arena_id is not None ) mtgjson_card.availability.mtgo = "mtgo" in scryfall_object.get("games", []) or ( mtgjson_card.identifiers.mtgo_id is not None ) mtgjson_card.availability.paper = not mtgjson_card.is_online_only mtgjson_card.availability.shandalar = "astral" in scryfall_object.get("games", []) mtgjson_card.availability.dreamcast = "sega" in scryfall_object.get("games", []) # Explicit Variables -- Based on the face of the card mtgjson_card.loyalty = face_data.get("loyalty") ascii_name = ( unicodedata.normalize("NFD", mtgjson_card.name) .encode("ascii", "ignore") .decode() ) if mtgjson_card.name != ascii_name: LOGGER.debug(f"Adding ascii name for {mtgjson_card.name} -> {ascii_name}") mtgjson_card.ascii_name = ascii_name mtgjson_card.power = face_data.get("power", "") mtgjson_card.text = face_data.get("oracle_text", "") mtgjson_card.toughness = face_data.get("toughness", "") mtgjson_card.type = face_data.get("type_line", "Card") # Explicit -- Depending on if card face has it or not mtgjson_card.flavor_text = ( face_data.get("flavor_text") if face_data.get("flavor_text") else scryfall_object.get("flavor_text") ) if "color_indicator" in face_data.keys(): mtgjson_card.color_indicator = face_data["color_indicator"] elif "color_indicator" in scryfall_object.keys(): mtgjson_card.color_indicator = scryfall_object["color_indicator"] if scryfall_object["multiverse_ids"]: if len(scryfall_object["multiverse_ids"]) > face_id: mtgjson_card.identifiers.multiverse_id = get_str_or_none( scryfall_object["multiverse_ids"][face_id] ) else: mtgjson_card.identifiers.multiverse_id = get_str_or_none( scryfall_object["multiverse_ids"][0] ) # Add "side" for split cards (cards with exactly 2 sides) # Also set face name if mtgjson_card.get_names(): mtgjson_card.face_name = str(face_data["name"]) if mtgjson_card.layout not in ["meld"]: # Fix #632 as there are very limited distinguishing attributes if mtgjson_card.set_code == "tust": mtgjson_card.side = "a" if mtgjson_card.type != "Token" else "b" else: # chr(97) = 'a', chr(98) = 'b', ... mtgjson_card.side = chr( mtgjson_card.get_names().index(mtgjson_card.face_name) + 97 ) # Implicit Variables mtgjson_card.is_timeshifted = ( scryfall_object.get("frame") == "future" or mtgjson_card.set_code == "tsb" ) mtgjson_card.printings = parse_printings( scryfall_object["prints_search_uri"].replace("%22", "") ) mtgjson_card.legalities = parse_legalities(scryfall_object["legalities"]) mtgjson_card.rulings = parse_rulings(scryfall_object["rulings_uri"]) card_types = parse_card_types(mtgjson_card.type) mtgjson_card.supertypes = card_types[0] mtgjson_card.types = card_types[1] mtgjson_card.subtypes = card_types[2] if "Planeswalker" in mtgjson_card.types: mtgjson_card.text = re.sub(r"([+−-]?[0-9X]+):", r"[\1]:", mtgjson_card.text) # Keywords have to be split up on our end for individual card faces mtgjson_card.keywords = [ keyword for keyword in sorted(scryfall_object.get("keywords", [])) if keyword.lower() in mtgjson_card.text.lower() ] # Handle Meld components, as well as tokens if "all_parts" in scryfall_object.keys(): meld_object = [] mtgjson_card.set_names(None) for a_part in scryfall_object["all_parts"]: if a_part["component"] != "token": if "//" in a_part.get("name"): mtgjson_card.set_names(a_part.get("name").split("//")) break # This is a meld only-fix, so we ignore tokens/combo pieces if "meld" in a_part["component"]: meld_object.append(a_part["component"]) mtgjson_card.append_names(a_part.get("name")) # If the only entry is the original card, empty the names array if ( mtgjson_card.get_names() and len(mtgjson_card.get_names()) == 1 and mtgjson_card.name in mtgjson_card.get_names() ): mtgjson_card.set_names(None) # Meld cards should be CardA, Meld, CardB. if ( len(meld_object) == 3 and meld_object[1] != "meld_result" and mtgjson_card.get_names() ): mtgjson_card.set_names( [ mtgjson_card.get_names()[0], mtgjson_card.get_names()[2], mtgjson_card.get_names()[1], ] ) # Meld Object if mtgjson_card.get_names() and len(mtgjson_card.get_names()) == 3: # Front Sides will become Front1//Back, Front2//Back # Back Side will just be Back if mtgjson_card.name != mtgjson_card.get_names()[1]: mtgjson_card.side = "a" mtgjson_card.face_name = mtgjson_card.name mtgjson_card.name = ( f"{mtgjson_card.name} // {mtgjson_card.get_names()[1]}" ) else: mtgjson_card.face_name = mtgjson_card.name mtgjson_card.side = "b" mtgjson_card.foreign_data = parse_foreign( scryfall_object["prints_search_uri"].replace("%22", ""), mtgjson_card.face_name if mtgjson_card.face_name else mtgjson_card.name, mtgjson_card.number, mtgjson_card.set_code, ) if mtgjson_card.name in ScryfallProvider().cards_without_limits: mtgjson_card.has_alternative_deck_limit = True add_uuid(mtgjson_card) add_leadership_skills(mtgjson_card) # Add purchase URL components after UUIDs are finalized mtgjson_card.raw_purchase_urls.update(scryfall_object.get("purchase_uris", {})) if "tcgplayer_id" in scryfall_object: mtgjson_card.identifiers.tcgplayer_product_id = str( scryfall_object["tcgplayer_id"] ) mtgjson_card.purchase_urls.tcgplayer = url_keygen( mtgjson_card.identifiers.tcgplayer_product_id + mtgjson_card.uuid ) if is_token: reverse_related: List[str] = [] if "all_parts" in scryfall_object: for a_part in scryfall_object["all_parts"]: if a_part.get("name") != mtgjson_card.name: reverse_related.append(a_part.get("name")) mtgjson_card.reverse_related = reverse_related # Gatherer Calls -- SLOWWWWW if mtgjson_card.identifiers.multiverse_id: gatherer_cards = GathererProvider().get_cards( mtgjson_card.identifiers.multiverse_id, mtgjson_card.set_code ) if len(gatherer_cards) > face_id: mtgjson_card.original_type = gatherer_cards[face_id].original_types mtgjson_card.original_text = gatherer_cards[face_id].original_text mtgjson_cards.append(mtgjson_card) return mtgjson_cards def add_variations_and_alternative_fields(mtgjson_set: MtgjsonSetObject) -> None: """ Set the variations, other_face_ids, and is_alternative statuses for all cards within the set object :param mtgjson_set: MTGJSON Set Object to modify """ if not mtgjson_set.cards: return LOGGER.info(f"Adding variations for {mtgjson_set.code}") for this_card in mtgjson_set.cards: # Adds other face ID list if this_card.get_names(): this_card.other_face_ids = [] for other_card in mtgjson_set.cards: if other_card.face_name not in this_card.get_names(): continue if other_card.uuid == this_card.uuid: continue if this_card.layout == "meld": # Meld cards should account for the other sides if this_card.side != other_card.side: this_card.other_face_ids.append(other_card.uuid) elif other_card.number: # Most split cards should have the same number if other_card.number == this_card.number: this_card.other_face_ids.append(other_card.uuid) else: # No number? No problem, just add it! this_card.other_face_ids.append(other_card.uuid) # Adds variations variations = [ item.uuid for item in mtgjson_set.cards if item.name.split(" (")[0] == this_card.name.split(" (")[0] and item.face_name == this_card.face_name and item.uuid != this_card.uuid and (item.number != this_card.number if item.number else True) ] if variations: this_card.variations = variations # Add alternative tag # Ignore singleton printings in set, as well as basics if not variations or this_card.name in BASIC_LAND_NAMES: continue # Some hardcoded checking due to inconsistencies upstream if mtgjson_set.code.upper() in ["UNH", "10E"]: # Check for duplicates, mark the foils if ( len(variations) >= 1 and this_card.has_foil and not this_card.has_non_foil ): this_card.is_alternative = True elif mtgjson_set.code.upper() in ["CN2", "BBD"]: # Check for set number > set size if int(this_card.number.replace(chr(9733), "")) > mtgjson_set.base_set_size: this_card.is_alternative = True else: # Check for a star in the number if chr(9733) in this_card.number: this_card.is_alternative = True LOGGER.info(f"Finished adding variations for {mtgjson_set.code}") def add_card_kingdom_details(mtgjson_set: MtgjsonSetObject) -> None: """ Add the CardKingdom components, like IDs and purchase URLs :param mtgjson_set: MTGJSON Set """ LOGGER.info(f"Adding CK details for {mtgjson_set.code}") translation_table = MTGBanProvider().get_mtgjson_to_card_kingdom() for mtgjson_card in mtgjson_set.cards: if mtgjson_card.uuid not in translation_table: continue entry = translation_table[mtgjson_card.uuid] if "normal" in entry: mtgjson_card.identifiers.card_kingdom_id = str(entry["normal"]["id"]) mtgjson_card.purchase_urls.card_kingdom = url_keygen( entry["normal"]["url"] + mtgjson_card.uuid ) mtgjson_card.raw_purchase_urls.update( {"cardKingdom": entry["normal"]["url"] + consts.CARD_KINGDOM_REFERRAL} ) if "foil" in entry: mtgjson_card.identifiers.card_kingdom_foil_id = str(entry["foil"]["id"]) mtgjson_card.purchase_urls.card_kingdom_foil = url_keygen( entry["foil"]["url"] + mtgjson_card.uuid ) mtgjson_card.raw_purchase_urls.update( {"cardKingdomFoil": entry["foil"]["url"] + consts.CARD_KINGDOM_REFERRAL} ) LOGGER.info(f"Finished adding CK details for {mtgjson_set.code}") def add_mcm_details(mtgjson_set: MtgjsonSetObject) -> None: """ Add the MKM components to a set's cards and tokens :param mtgjson_set: MTGJSON Set """ LOGGER.info(f"Adding MCM details for {mtgjson_set.code}") mkm_cards = CardMarketProvider().get_mkm_cards(mtgjson_set.mcm_id) for mtgjson_card in mtgjson_set.cards: delete_key = False # There are multiple ways MKM represents cards... if mtgjson_card.name.lower() in mkm_cards.keys(): # First lets see if the card name is found card_key = mtgjson_card.name.lower() elif ( mtgjson_card.face_name and mtgjson_card.face_name.lower() in mkm_cards.keys() ): # If that failed, lets see if the face name is found card_key = mtgjson_card.face_name.lower() elif mtgjson_card.name.replace("//", "/").lower() in mkm_cards.keys(): # Finally, lets check if they used a single slash for split-type cards card_key = mtgjson_card.name.replace("//", "/").lower() else: # Multiple printings of a card in the set... just guess at this point card_key = "" for mkm_card in mkm_cards: if mkm_card.startswith(mtgjson_card.name.lower()): card_key = mkm_card delete_key = True break if not card_key: LOGGER.debug(f"Failed to find {mtgjson_card.name} for MKM") continue mkm_obj = mkm_cards[card_key] if delete_key: del mkm_cards[card_key] mtgjson_card.identifiers.mcm_id = str(mkm_obj["idProduct"]) mtgjson_card.identifiers.mcm_meta_id = str(mkm_obj["idMetaproduct"]) mtgjson_card.purchase_urls.cardmarket = url_keygen( mtgjson_card.identifiers.mcm_id + mtgjson_card.uuid + CARD_MARKET_BUFFER + mtgjson_card.identifiers.mcm_meta_id ) LOGGER.info(f"Finished adding MCM details for {mtgjson_set.code}") def get_base_and_total_set_sizes(set_code: str) -> Tuple[int, int]: """ Get the size of a set from scryfall or corrections file :param set_code: Set code, upper case :return: Amount of cards in set (base, total) """ # Load cache if not loaded with RESOURCE_PATH.joinpath("base_set_sizes.json").open(encoding="utf-8") as f: base_set_size_override = json.load(f) if set_code in base_set_size_override.keys(): # Manual correction base_set_size = int(base_set_size_override[set_code]) else: # Download on the fly base_set_size_download = ScryfallProvider().download( ScryfallProvider().CARDS_IN_BASE_SET_URL.format(set_code) ) # Wasn't able to determine, so use all cards instead if base_set_size_download["object"] == "error": base_set_size_download = ScryfallProvider().download( ScryfallProvider().CARDS_IN_SET.format(set_code) ) base_set_size = int(base_set_size_download.get("total_cards", 0)) total_set_size = len(ScryfallProvider().download_cards(set_code)) return base_set_size, total_set_size
from tkinter import filedialog from tkinter import * import copy import shutil import sys import os import locg import comicvine import comicutil import archiveutil import imageutil import config import comixology import comicdb from comicapi import comicinfoxml, filenameparser SETTINGS = config.get_config() REMOVE_SCENE_PROMO = SETTINGS["remove_scene_promo"] REMOVE_SUBFOLDERS = SETTINGS["remove_subfolders"] REMOVE_COMIXOLOGY_META = SETTINGS["remove_comixology_meta"] LIBRARY_LOCATION = SETTINGS["library_location"] KEEP_ORIGINAL = SETTINGS["keep_original"] COMPARE_COVERS = SETTINGS["compare_covers"] PDF_ZOOM = SETTINGS['from_pdf_zoom'] ARCHIVE_TYPE = SETTINGS['archive_type'] METADATA_TYPE = SETTINGS['metadata_type'] IMAGE_TYPE = SETTINGS['image_type'] RENAME_TEMPLATE = SETTINGS['rename_template'] COMIC_DATABASE = SETTINGS["comic_database"] WRITE_METADATA = SETTINGS["write_metadata"] # true, false, overwrite, merge_existing, merge_new def screen_clear(): # for mac and linux(here, os.name is 'posix') if os.name == 'posix': _ = os.system('clear') else: # for windows platfrom _ = os.system('cls') # print out some text def move_to_library(file,details): extension = os.path.splitext(file)[1] new_name = copy.copy(RENAME_TEMPLATE) seperator = os.path.sep publisher = details["publisher"] series = details["volumeName"] issue_name = details["issueName"] issue_year = details["storeDate"].split('-')[0] series_year = details["volumeYear"] if series_year == "UNKNOWN" or series_year == "PRESENT" or series_year == None or series_year == "": series_year = issue_year issue = details['issueNumber'] new_name = new_name.format(seperator=seperator,publisher=publisher,series=series,series_year=series_year,issue_year=issue_year,issue_pad2=comicutil.pad_to_length(issue,2),issue_pad3=comicutil.pad_to_length(issue,3),issue_name=issue_name) new_name = os.path.join(LIBRARY_LOCATION,f"{new_name}{extension}") new_name = comicutil.stripBadChars(new_name) os.makedirs(os.path.dirname(new_name),exist_ok=True) shutil.move(file,new_name) print(f"Comic copied to {new_name}") def convert_to(oldfile,newfile,metadata=None,image_type=IMAGE_TYPE): try: tmp = "" try: tmp = archiveutil.extract_to_temp(oldfile,PDF_ZOOM) except Exception as e: print("Extract error: ",e) shutil.rmtree(tmp) return False if REMOVE_SCENE_PROMO: try: comicutil.remove_promos_from_dir(tmp) except Exception as e: print(f"Error removing promos: {e}") shutil.rmtree(tmp) return False if REMOVE_COMIXOLOGY_META: try: comicutil.remove_comixology_meta_from_dir(tmp) except Exception as e: print(f"Error removing promos: {e}") shutil.rmtree(tmp) return False if IMAGE_TYPE != "" and IMAGE_TYPE != None: imageutil.convert_dir_to_type(tmp,image_type) if REMOVE_SUBFOLDERS: archiveutil.remove_subfolders(tmp) if metadata is not None: try: meta = comicutil.comicdb_to_meta(metadata) metas = comicutil.get_meta_from_dir(tmp) for m in comicutil.metadata_files: if metas[METADATA_TYPE] == "" and WRITE_METADATA: comicutil.write_meta_to_dir(meta,tmp,METADATA_TYPE) elif metas[METADATA_TYPE] != "" and WRITE_METADATA == "overwrite": comicutil.write_meta_to_dir(meta,tmp,METADATA_TYPE) elif metas[METADATA_TYPE] != "" and WRITE_METADATA: if METADATA_TYPE == "ComicInfo.xml": xml1 = comicinfoxml.ComicInfoXml().stringFromMetadata(metas[METADATA_TYPE]) xml2 = comicinfoxml.ComicInfoXml().stringFromMetadata(meta) xml3 = "" if WRITE_METADATA == "merge_new": xml3 = comicutil.merge_meta_xml(xml1,xml2,"xml1") if WRITE_METADATA == "merge_existing": xml3 = comicutil.merge_meta_xml(xml1,xml2,"xml2") new_meta = comicinfoxml.ComicInfoXml().metadataFromString(xml3) comicutil.write_meta_to_dir(new_meta,tmp,METADATA_TYPE) except Exception as e: print(f"Failed to write metadata to directory: {repr(e)}") try: return archiveutil.dir_to_archive(tmp,newfile,metadata) except Exception as e: print("Archive error: ",e) shutil.rmtree(tmp) return False except Exception as e: print(f"Convert error: {repr(e)}") return False def file_or_folder(): print("--------------------------") print("File or Folder? Default: 1") print("--------------------------") print("1: File") print("2: Folder") print("--------------------------") val = input("") or "1" if val == "": return if val == "1": Tk().withdraw() return filedialog.askopenfilename(initialdir = "/",title = "Select file") if val == "2": Tk().withdraw() return filedialog.askdirectory(initialdir="/",title='Select directory') def convert_to_archive_type(): print("--------------------------") print("Convert to New Format") print("Default 1") print("--------------------------") print(f"1: Convert to Prefered Type: {ARCHIVE_TYPE.upper()}") print("2: Convert to CBZ") print("3: Convert to CBR") print("4: Convert to PDF") print("5: Convert to CB7") print("6: Convert to CBA") print("7: Convert to CBT") print("8: Convert to EPUB") print("--------------------------") val = input("") or "1" selected = file_or_folder() if selected == "" or selected == None: return file_types = {"1":ARCHIVE_TYPE.lower(),"2":"cbz","3":"cbr","4":"pdf","5":"cb7","6":"cba","7":"cbt","8":"epub"} if os.path.isdir(selected): for subdir, dirs, files in os.walk(selected): for file in files: if archiveutil.is_archive(file): old_file = subdir + os.sep + file base_file = os.path.splitext(old_file)[0] new_file = f"{base_file}.{file_types[val]}" converted = convert_to(old_file,new_file) print(f"file coverted: {converted}") if not KEEP_ORIGINAL and converted and old_file != new_file: os.remove(old_file) else: if archiveutil.is_archive(selected): old_file = selected base_file = os.path.splitext(old_file)[0] new_file = f"{base_file}.{file_types[val]}" converted = convert_to(old_file,new_file) if not KEEP_ORIGINAL and converted and old_file != new_file: os.remove(old_file) def convert_to_image_type(): print("--------------------------") print("Convert to New Format") print("Default 1") print("--------------------------") print(f"1: Convert to Prefered Type: {IMAGE_TYPE.upper()}") print("2: Convert to JPG") print("3: Convert to PNG") print("4: Convert to WEBP") print("5: Convert to BMP") print("6: Convert to GIF") print("7: Convert to TIFF") print("--------------------------") val = input("") or "1" selected = file_or_folder() if selected == "" or selected == None: return image_types = {"1":IMAGE_TYPE.lower(),"2":"jpg","3":"png","4":"webp","5":"bmp","6":"gif","7":"tiff"} if os.path.isdir(selected): for subdir, dirs, files in os.walk(selected): for file in files: if archiveutil.is_archive(file): file = subdir + os.sep + file converted = convert_to(file,file,comicdb_info=None,image_type=image_types[val]) print(f"Image files in {converted} converted to {image_types[val]}") else: if archiveutil.is_archive(selected): file = selected converted = convert_to(file,file,comicdb_info=None,image_type=image_types[val]) print(f"Image files in {converted} converted to {image_types[val]}") def remove_scene_promos(): print("--------------------------") print("Remove Scene Promos") print("") selected = file_or_folder() if selected == "" or selected == None: return if os.path.isdir(selected): for subdir, dirs, files in os.walk(selected): for file in files: filepath = subdir + os.sep + file if archiveutil.is_archive(filepath): comicutil.remove_promos_from_file(filepath,PDF_ZOOM) else: if archiveutil.is_archive(selected): comicutil.remove_promos_from_file(selected,PDF_ZOOM) def set_database(): print("--------------------------") print("Set Comic Database") print("Default 1") print("--------------------------") print("1: Comicvine") print("2: League of Comic Geeks") print("3: Comixology") print("4: Back to Menu") print("--------------------------") val = input("") or "1" if val == "1": comicdb.set_database("comicvine") print("Comic Database set to Comicvine") if val == "2": comicdb.set_database("locg") print("Comic Database set to League of Comic Geeks") if val == "3": comicdb.set_database("comixology") print("Comic Database set to Comixology") if val == "4" or val.lower() == "q" or val == "quit" or val == "back": comicdb.set_database("comicvine") def tag_interactive(filename,results=None,issues=None): last_series = None results = results issues = issues originalfilename = filename details = comicutil.get_comic_details(filename) query = f"{details.series}" if details.year == None or details.year == "": details.year = "0000" if results != None: last_series = results["last_series"] results = results["results"] if results == None: results = comicdb.search_series(query,50,70) print("-----------------------------------------------------------------") print(f"File Name: {originalfilename}") #print(f"Search Query: {query}") stop = False year_range = False for result in results: if not stop: if result["end_year"] == "PRESENT" or result["end_year"] == "UNKNOWN": year_range = True elif int(details.year) <= int(result["end_year"]): year_range = True if year_range: val = None if last_series != details.series: print(f'---------------------- Seach Result -----------------------------') print(f'Series Name: {result['name']}') print(f'Year: {result['year']}') print(f'Publisher: {result['publisher']}') print(f'Issues: {result['issue_count']}') print(f'Series Link: {result['link']}') print("-----------------------------------------------------------------") print(f'Name Match Confidence: {result['confidence']}') print("-----------------------------------------------------------------") val = input("Is this the right series? (y/n/q) default (y): ") or "y" else: val = "y" if val.lower() == "y" or val.lower() == "yes": val = "y" elif val.lower() == "n" or val.lower() == "no": val = "n" elif val.lower() == "q" or val.lower() == "quit" or val.lower() == "exit": val = "q" if val == "y": if issues == None: issues = comicdb.get_series_by_id(result["id"]) for issue in issues: if issue['issueNumber'] == details.issue or details.issue == "": if "storeDate" not in issue: issue.update(comicdb.get_issue_by_id(issue["id"])) extracted = "" if COMPARE_COVERS: extracted = archiveutil.extract_to_temp(filename) #if details.year in issue["storeDate"]: if True: print("-----------------------------------------------------------------") print(f"File Name: {originalfilename}") print(f'---------------------- Seach Result -----------------------------') print(f'Issue Name: {issue['issueName']}') print(f'Store Date: {issue['storeDate']}') print(f'Issue Link: {issue['issueLink']}') print(f'Series Name: {result['name']}') print(f'Series Year: {result['year']}') print(f'Publisher: {result['publisher']}') if COMPARE_COVERS: webcover = imageutil.getImageFromUrl(issue['coverImage']) cbcover = comicutil.get_cover_from_dir(extracted) cover_confidence = imageutil.compare_images2(webcover,cbcover) print(f'Cover Match Confidence: {cover_confidence}') print("-----------------------------------------------------------------") val = input("Rename with these details? (y/n/q) default (y): ") or "y" if val.lower() == "y" or val.lower() == "yes": val = "y" elif val.lower() == "n" or val.lower() == "no": val = "n" elif val.lower() == "q" or val.lower() == "quit" or val.lower() == "exit": val = "q" if val == "y": if "description" not in issue: issue.update(comicdb.get_issue_by_id(issue["id"])) file_folder = os.path.dirname(filename) issue_name = comicutil.stripBadChars(comicutil.remove_issue_number(issue["issueName"])) issue_year = issue["storeDate"].split("-")[0] #new_filename = f'{issue_name} #{comicutil.pad_to_length(issue['issueNumber'])} ({issue_year}).{ARCHIVE_TYPE.lower()}' new_filename = f"temp.{ARCHIVE_TYPE.lower()}" new_file = os.path.join(file_folder,new_filename) meta_details = copy.deepcopy(result) meta_details.update(issue) converted = convert_to(filename,new_file,meta_details) if not KEEP_ORIGINAL and converted and filename != new_file: os.remove(filename) move_to_library(new_file,meta_details) result["last_series"] = details.series results = {"results":results,"last_series":details.series} return results, issues, "" break if val == "n": pass if val == "q": if os.path.isdir(extracted): shutil.rmtree(extracted) stop = True break if val == "q": return results, issues, "quit" if val == "n": pass try: if os.path.isdir(extracted): shutil.rmtree(extracted) except: pass results = {"results":results,"last_series":details.series} return results, issues, "" def search_and_tag_interactive(): print("--------------------------") print("Get Details and Organize") print("") selected = file_or_folder() if selected == "" or selected == None: return if os.path.isdir(selected): results = None issues = None for subdir, dirs, files in os.walk(selected): for file in files: filepath = subdir + os.sep + file if archiveutil.is_archive(filepath): details = comicutil.get_comic_details(filepath) if results is not None: if results["last_series"] != details.series: results = None issues = None r, i, q = tag_interactive(filepath,results,issues) results = r issues = i if q == "quit": break else: if archiveutil.is_archive(selected): tag_interactive(selected) def main_menu(): print("============================================================================") print("What would like to do? default: 1") print("") print("1: Tag and organize.") print("2: Convert archive type.") print("3: Convert image type.") print("4: Remove scene promos.") print("5: Change Comic Database.") print("6: List Supported Archive Types.") print("7: Quit.") print("============================================================================") val = input("") or "1" if val == "1": search_and_tag_interactive() if val == "2": convert_to_archive_type() if val == "3": convert_to_image_type() if val == "4": remove_scene_promos() if val == "5": set_database() if val == "6": archiveutil.list_supported_formats() if val == "7" or val.lower() == "q" or val.lower() == "quit" or val.lower() == "exit": sys.exit() if __name__ == "__main__": while True: main_menu()
from tkinter import filedialog from tkinter import * import copy import shutil import sys import os import locg import comicvine import comicutil import archiveutil import imageutil import config import comixology import comicdb from comicapi import comicinfoxml, filenameparser SETTINGS = config.get_config() REMOVE_SCENE_PROMO = SETTINGS["remove_scene_promo"] REMOVE_SUBFOLDERS = SETTINGS["remove_subfolders"] REMOVE_COMIXOLOGY_META = SETTINGS["remove_comixology_meta"] LIBRARY_LOCATION = SETTINGS["library_location"] KEEP_ORIGINAL = SETTINGS["keep_original"] COMPARE_COVERS = SETTINGS["compare_covers"] PDF_ZOOM = SETTINGS['from_pdf_zoom'] ARCHIVE_TYPE = SETTINGS['archive_type'] METADATA_TYPE = SETTINGS['metadata_type'] IMAGE_TYPE = SETTINGS['image_type'] RENAME_TEMPLATE = SETTINGS['rename_template'] COMIC_DATABASE = SETTINGS["comic_database"] WRITE_METADATA = SETTINGS["write_metadata"] # true, false, overwrite, merge_existing, merge_new def screen_clear(): # for mac and linux(here, os.name is 'posix') if os.name == 'posix': _ = os.system('clear') else: # for windows platfrom _ = os.system('cls') # print out some text def move_to_library(file,details): extension = os.path.splitext(file)[1] new_name = copy.copy(RENAME_TEMPLATE) seperator = os.path.sep publisher = details["publisher"] series = details["volumeName"] issue_name = details["issueName"] issue_year = details["storeDate"].split('-')[0] series_year = details["volumeYear"] if series_year == "UNKNOWN" or series_year == "PRESENT" or series_year == None or series_year == "": series_year = issue_year issue = details['issueNumber'] new_name = new_name.format(seperator=seperator,publisher=publisher,series=series,series_year=series_year,issue_year=issue_year,issue_pad2=comicutil.pad_to_length(issue,2),issue_pad3=comicutil.pad_to_length(issue,3),issue_name=issue_name) new_name = os.path.join(LIBRARY_LOCATION,f"{new_name}{extension}") new_name = comicutil.stripBadChars(new_name) os.makedirs(os.path.dirname(new_name),exist_ok=True) shutil.move(file,new_name) print(f"Comic copied to {new_name}") def convert_to(oldfile,newfile,metadata=None,image_type=IMAGE_TYPE): try: tmp = "" try: tmp = archiveutil.extract_to_temp(oldfile,PDF_ZOOM) except Exception as e: print("Extract error: ",e) shutil.rmtree(tmp) return False if REMOVE_SCENE_PROMO: try: comicutil.remove_promos_from_dir(tmp) except Exception as e: print(f"Error removing promos: {e}") shutil.rmtree(tmp) return False if REMOVE_COMIXOLOGY_META: try: comicutil.remove_comixology_meta_from_dir(tmp) except Exception as e: print(f"Error removing promos: {e}") shutil.rmtree(tmp) return False if IMAGE_TYPE != "" and IMAGE_TYPE != None: imageutil.convert_dir_to_type(tmp,image_type) if REMOVE_SUBFOLDERS: archiveutil.remove_subfolders(tmp) if metadata is not None: try: meta = comicutil.comicdb_to_meta(metadata) metas = comicutil.get_meta_from_dir(tmp) for m in comicutil.metadata_files: if metas[METADATA_TYPE] == "" and WRITE_METADATA: comicutil.write_meta_to_dir(meta,tmp,METADATA_TYPE) elif metas[METADATA_TYPE] != "" and WRITE_METADATA == "overwrite": comicutil.write_meta_to_dir(meta,tmp,METADATA_TYPE) elif metas[METADATA_TYPE] != "" and WRITE_METADATA: if METADATA_TYPE == "ComicInfo.xml": xml1 = comicinfoxml.ComicInfoXml().stringFromMetadata(metas[METADATA_TYPE]) xml2 = comicinfoxml.ComicInfoXml().stringFromMetadata(meta) xml3 = "" if WRITE_METADATA == "merge_new": xml3 = comicutil.merge_meta_xml(xml1,xml2,"xml1") if WRITE_METADATA == "merge_existing": xml3 = comicutil.merge_meta_xml(xml1,xml2,"xml2") new_meta = comicinfoxml.ComicInfoXml().metadataFromString(xml3) comicutil.write_meta_to_dir(new_meta,tmp,METADATA_TYPE) except Exception as e: print(f"Failed to write metadata to directory: {repr(e)}") try: return archiveutil.dir_to_archive(tmp,newfile,metadata) except Exception as e: print("Archive error: ",e) shutil.rmtree(tmp) return False except Exception as e: print(f"Convert error: {repr(e)}") return False def file_or_folder(): print("--------------------------") print("File or Folder? Default: 1") print("--------------------------") print("1: File") print("2: Folder") print("--------------------------") val = input("") or "1" if val == "": return if val == "1": Tk().withdraw() return filedialog.askopenfilename(initialdir = "/",title = "Select file") if val == "2": Tk().withdraw() return filedialog.askdirectory(initialdir="/",title='Select directory') def convert_to_archive_type(): print("--------------------------") print("Convert to New Format") print("Default 1") print("--------------------------") print(f"1: Convert to Prefered Type: {ARCHIVE_TYPE.upper()}") print("2: Convert to CBZ") print("3: Convert to CBR") print("4: Convert to PDF") print("5: Convert to CB7") print("6: Convert to CBA") print("7: Convert to CBT") print("8: Convert to EPUB") print("--------------------------") val = input("") or "1" selected = file_or_folder() if selected == "" or selected == None: return file_types = {"1":ARCHIVE_TYPE.lower(),"2":"cbz","3":"cbr","4":"pdf","5":"cb7","6":"cba","7":"cbt","8":"epub"} if os.path.isdir(selected): for subdir, dirs, files in os.walk(selected): for file in files: if archiveutil.is_archive(file): old_file = subdir + os.sep + file base_file = os.path.splitext(old_file)[0] new_file = f"{base_file}.{file_types[val]}" converted = convert_to(old_file,new_file) print(f"file coverted: {converted}") if not KEEP_ORIGINAL and converted and old_file != new_file: os.remove(old_file) else: if archiveutil.is_archive(selected): old_file = selected base_file = os.path.splitext(old_file)[0] new_file = f"{base_file}.{file_types[val]}" converted = convert_to(old_file,new_file) if not KEEP_ORIGINAL and converted and old_file != new_file: os.remove(old_file) def convert_to_image_type(): print("--------------------------") print("Convert to New Format") print("Default 1") print("--------------------------") print(f"1: Convert to Prefered Type: {IMAGE_TYPE.upper()}") print("2: Convert to JPG") print("3: Convert to PNG") print("4: Convert to WEBP") print("5: Convert to BMP") print("6: Convert to GIF") print("7: Convert to TIFF") print("--------------------------") val = input("") or "1" selected = file_or_folder() if selected == "" or selected == None: return image_types = {"1":IMAGE_TYPE.lower(),"2":"jpg","3":"png","4":"webp","5":"bmp","6":"gif","7":"tiff"} if os.path.isdir(selected): for subdir, dirs, files in os.walk(selected): for file in files: if archiveutil.is_archive(file): file = subdir + os.sep + file converted = convert_to(file,file,comicdb_info=None,image_type=image_types[val]) print(f"Image files in {converted} converted to {image_types[val]}") else: if archiveutil.is_archive(selected): file = selected converted = convert_to(file,file,comicdb_info=None,image_type=image_types[val]) print(f"Image files in {converted} converted to {image_types[val]}") def remove_scene_promos(): print("--------------------------") print("Remove Scene Promos") print("") selected = file_or_folder() if selected == "" or selected == None: return if os.path.isdir(selected): for subdir, dirs, files in os.walk(selected): for file in files: filepath = subdir + os.sep + file if archiveutil.is_archive(filepath): comicutil.remove_promos_from_file(filepath,PDF_ZOOM) else: if archiveutil.is_archive(selected): comicutil.remove_promos_from_file(selected,PDF_ZOOM) def set_database(): print("--------------------------") print("Set Comic Database") print("Default 1") print("--------------------------") print("1: Comicvine") print("2: League of Comic Geeks") print("3: Comixology") print("4: Back to Menu") print("--------------------------") val = input("") or "1" if val == "1": comicdb.set_database("comicvine") print("Comic Database set to Comicvine") if val == "2": comicdb.set_database("locg") print("Comic Database set to League of Comic Geeks") if val == "3": comicdb.set_database("comixology") print("Comic Database set to Comixology") if val == "4" or val.lower() == "q" or val == "quit" or val == "back": comicdb.set_database("comicvine") def tag_interactive(filename,results=None,issues=None): last_series = None results = results issues = issues originalfilename = filename details = comicutil.get_comic_details(filename) query = f"{details.series}" if details.year == None or details.year == "": details.year = "0000" if results != None: last_series = results["last_series"] results = results["results"] if results == None: results = comicdb.search_series(query,50,70) print("-----------------------------------------------------------------") print(f"File Name: {originalfilename}") #print(f"Search Query: {query}") stop = False year_range = False for result in results: if not stop: if result["end_year"] == "PRESENT" or result["end_year"] == "UNKNOWN": year_range = True elif int(details.year) <= int(result["end_year"]): year_range = True if year_range: val = None if last_series != details.series: print(f'---------------------- Seach Result -----------------------------') print(f'Series Name: {result["name"]}') print(f'Year: {result["year"]}') print(f'Publisher: {result["publisher"]}') print(f'Issues: {result["issue_count"]}') print(f'Series Link: {result["link"]}') print("-----------------------------------------------------------------") print(f'Name Match Confidence: {result["confidence"]}') print("-----------------------------------------------------------------") val = input("Is this the right series? (y/n/q) default (y): ") or "y" else: val = "y" if val.lower() == "y" or val.lower() == "yes": val = "y" elif val.lower() == "n" or val.lower() == "no": val = "n" elif val.lower() == "q" or val.lower() == "quit" or val.lower() == "exit": val = "q" if val == "y": if issues == None: issues = comicdb.get_series_by_id(result["id"]) for issue in issues: if issue['issueNumber'] == details.issue or details.issue == "": if "storeDate" not in issue: issue.update(comicdb.get_issue_by_id(issue["id"])) extracted = "" if COMPARE_COVERS: extracted = archiveutil.extract_to_temp(filename) #if details.year in issue["storeDate"]: if True: print("-----------------------------------------------------------------") print(f"File Name: {originalfilename}") print(f'---------------------- Seach Result -----------------------------') print(f'Issue Name: {issue["issueName"]}') print(f'Store Date: {issue["storeDate"]}') print(f'Issue Link: {issue["issueLink"]}') print(f'Series Name: {result["name"]}') print(f'Series Year: {result["year"]}') print(f'Publisher: {result["publisher"]}') if COMPARE_COVERS: webcover = imageutil.getImageFromUrl(issue['coverImage']) cbcover = comicutil.get_cover_from_dir(extracted) cover_confidence = imageutil.compare_images2(webcover,cbcover) print(f'Cover Match Confidence: {cover_confidence}') print("-----------------------------------------------------------------") val = input("Rename with these details? (y/n/q) default (y): ") or "y" if val.lower() == "y" or val.lower() == "yes": val = "y" elif val.lower() == "n" or val.lower() == "no": val = "n" elif val.lower() == "q" or val.lower() == "quit" or val.lower() == "exit": val = "q" if val == "y": if "description" not in issue: issue.update(comicdb.get_issue_by_id(issue["id"])) file_folder = os.path.dirname(filename) issue_name = comicutil.stripBadChars(comicutil.remove_issue_number(issue["issueName"])) issue_year = issue["storeDate"].split("-")[0] #new_filename = f'{issue_name} #{comicutil.pad_to_length(issue["issueNumber"])} ({issue_year}).{ARCHIVE_TYPE.lower()}' new_filename = f"temp.{ARCHIVE_TYPE.lower()}" new_file = os.path.join(file_folder,new_filename) meta_details = copy.deepcopy(result) meta_details.update(issue) converted = convert_to(filename,new_file,meta_details) if not KEEP_ORIGINAL and converted and filename != new_file: os.remove(filename) move_to_library(new_file,meta_details) result["last_series"] = details.series results = {"results":results,"last_series":details.series} return results, issues, "" break if val == "n": pass if val == "q": if os.path.isdir(extracted): shutil.rmtree(extracted) stop = True break if val == "q": return results, issues, "quit" if val == "n": pass try: if os.path.isdir(extracted): shutil.rmtree(extracted) except: pass results = {"results":results,"last_series":details.series} return results, issues, "" def search_and_tag_interactive(): print("--------------------------") print("Get Details and Organize") print("") selected = file_or_folder() if selected == "" or selected == None: return if os.path.isdir(selected): results = None issues = None for subdir, dirs, files in os.walk(selected): for file in files: filepath = subdir + os.sep + file if archiveutil.is_archive(filepath): details = comicutil.get_comic_details(filepath) if results is not None: if results["last_series"] != details.series: results = None issues = None r, i, q = tag_interactive(filepath,results,issues) results = r issues = i if q == "quit": break else: if archiveutil.is_archive(selected): tag_interactive(selected) def main_menu(): print("============================================================================") print("What would like to do? default: 1") print("") print("1: Tag and organize.") print("2: Convert archive type.") print("3: Convert image type.") print("4: Remove scene promos.") print("5: Change Comic Database.") print("6: List Supported Archive Types.") print("7: Quit.") print("============================================================================") val = input("") or "1" if val == "1": search_and_tag_interactive() if val == "2": convert_to_archive_type() if val == "3": convert_to_image_type() if val == "4": remove_scene_promos() if val == "5": set_database() if val == "6": archiveutil.list_supported_formats() if val == "7" or val.lower() == "q" or val.lower() == "quit" or val.lower() == "exit": sys.exit() if __name__ == "__main__": while True: main_menu()
import base64 import json import urllib.request, urllib.parse, urllib.error from urllib3._collections import HTTPHeaderDict from . import httplib2 import logger import traceback import socket import time import re import uuid from copy import deepcopy from threading import Thread from TestInput import TestInputSingleton from TestInput import TestInputServer from testconstants import MIN_KV_QUOTA, INDEX_QUOTA, FTS_QUOTA, CBAS_QUOTA from testconstants import COUCHBASE_FROM_VERSION_4, IS_CONTAINER, CLUSTER_QUOTA_RATIO from lib.Cb_constants.CBServer import CbServer try: from couchbase_helper.document import DesignDocument, View except ImportError: from lib.couchbase_helper.document import DesignDocument, View from memcached.helper.kvstore import KVStore from .exception import ServerAlreadyJoinedException, ServerUnavailableException, InvalidArgumentException from membase.api.exception import BucketCreationException, ServerSelfJoinException, ClusterRemoteException, \ RebalanceFailedException, FailoverFailedException, DesignDocCreationException, QueryViewException, \ ReadDocumentException, GetBucketInfoFailed, CompactViewFailed, SetViewInfoNotFound, AddNodeException, \ BucketFlushFailed, CBRecoveryFailedException, XDCRException, SetRecoveryTypeFailed, BucketCompactionException log = logger.Logger.get_logger() # helper library methods built on top of RestConnection interface class RestHelper(object): def __init__(self, rest_connection): self.rest = rest_connection def is_ns_server_running(self, timeout_in_seconds=360): log.info("-->is_ns_server_running?") end_time = time.time() + timeout_in_seconds while time.time() <= end_time: try: status = self.rest.get_nodes_self(5) if status is not None and status.status == 'healthy': return True else: if status is not None: log.warn("server {0}:{1} status is {2}"\ .format(self.rest.ip, self.rest.port, status.status)) else: log.warn("server {0}:{1} status is down"\ .format(self.rest.ip, self.rest.port)) except ServerUnavailableException: log.error("server {0}:{1} is unavailable"\ .format(self.rest.ip, self.rest.port)) time.sleep(5) msg = 'unable to connect to the node {0} even after waiting {1} seconds' log.error(msg.format(self.rest.ip, timeout_in_seconds)) return False def is_cluster_healthy(self, timeout=120): # get the nodes and verify that all the nodes.status are healthy nodes = self.rest.node_statuses(timeout) return all(node.status == 'healthy' for node in nodes) def rebalance_reached(self, percentage=100,retry_count=40): start = time.time() progress = 0 previous_progress = 0 retry = 0 while progress != -1 and progress < percentage and retry < retry_count: # -1 is error , -100 means could not retrieve progress progress = self.rest._rebalance_progress() if progress == -100: log.error("unable to retrieve rebalanceProgress.try again in 2 seconds") retry += 1 else: if previous_progress == progress: retry += 0.5 else: retry = 0 previous_progress = progress # sleep for 2 seconds time.sleep(3) if progress <= 0: log.error("rebalance progress code : {0}".format(progress)) return False elif retry >= retry_count: log.error("rebalance stuck on {0}%".format(progress)) return False else: duration = time.time() - start log.info('rebalance reached >{0}% in {1} seconds '.format(progress, duration)) return True # return true if cluster balanced, false if it needs rebalance def is_cluster_rebalanced(self): command = "ns_orchestrator:needs_rebalance()" status, content = self.rest.diag_eval(command) if status: return content.lower() == "false" log.error("can't define if cluster balanced") return None # this method will rebalance the cluster by passing the remote_node as # ejected node def remove_nodes(self, knownNodes, ejectedNodes, wait_for_rebalance=True): if len(ejectedNodes) == 0: return False self.rest.rebalance(knownNodes, ejectedNodes) if wait_for_rebalance: return self.rest.monitorRebalance() else: return False def vbucket_map_ready(self, bucket, timeout_in_seconds=360): end_time = time.time() + timeout_in_seconds while time.time() <= end_time: vBuckets = self.rest.get_vbuckets(bucket) if vBuckets: return True else: time.sleep(0.5) msg = 'vbucket map is not ready for bucket {0} after waiting {1} seconds' log.info(msg.format(bucket, timeout_in_seconds)) return False def bucket_exists(self, bucket): try: buckets = self.rest.get_buckets() names = [item.name for item in buckets] log.info("node {1} existing buckets : {0}" \ .format(names, self.rest.ip)) for item in buckets: if item.name == bucket: log.info("node {1} found bucket {0}" \ .format(bucket, self.rest.ip)) return True return False except Exception: return False def wait_for_node_status(self, node, expected_status, timeout_in_seconds): status_reached = False end_time = time.time() + timeout_in_seconds while time.time() <= end_time and not status_reached: nodes = self.rest.node_statuses() for n in nodes: if node.id == n.id: log.info('node {0} status : {1}'.format(node.id, n.status)) if n.status.lower() == expected_status.lower(): status_reached = True break if not status_reached: log.info("sleep for 5 seconds before reading the node.status again") time.sleep(5) log.info('node {0} status_reached : {1}'.format(node.id, status_reached)) return status_reached def _wait_for_task_pid(self, pid, end_time, ddoc_name): while (time.time() < end_time): new_pid, _ = self.rest._get_indexer_task_pid(ddoc_name) if pid == new_pid: time.sleep(5) continue else: return def _wait_for_indexer_ddoc(self, servers, ddoc_name, timeout=300): nodes = self.rest.get_nodes() servers_to_check = [] for node in nodes: for server in servers: if node.ip == server.ip and str(node.port) == str(server.port): servers_to_check.append(server) for server in servers_to_check: try: rest = RestConnection(server) log.info('Check index for ddoc %s , server %s' % (ddoc_name, server.ip)) end_time = time.time() + timeout log.info('Start getting index for ddoc %s , server %s' % (ddoc_name, server.ip)) old_pid, is_pid_blocked = rest._get_indexer_task_pid(ddoc_name) if not old_pid: log.info('Index for ddoc %s is not going on, server %s' % (ddoc_name, server.ip)) continue while is_pid_blocked: log.info('Index for ddoc %s is blocked, server %s' % (ddoc_name, server.ip)) self._wait_for_task_pid(old_pid, end_time, ddoc_name) old_pid, is_pid_blocked = rest._get_indexer_task_pid(ddoc_name) if time.time() > end_time: log.error("INDEX IS STILL BLOKED node %s ddoc % pid %" % (server, ddoc_name, old_pid)) break if old_pid: log.info('Index for ddoc %s is running, server %s' % (ddoc_name, server.ip)) self._wait_for_task_pid(old_pid, end_time, ddoc_name) except Exception as ex: log.error('unable to check index on server %s because of %s' % (server.ip, str(ex))) def _get_vbuckets(self, servers, bucket_name='default'): vbuckets_servers = {} for server in servers: buckets = RestConnection(server).get_buckets() if not buckets: return vbuckets_servers if bucket_name: bucket_to_check = [bucket for bucket in buckets if bucket.name == bucket_name][0] else: bucket_to_check = [bucket for bucket in buckets][0] vbuckets_servers[server] = {} vbs_active = [vb.id for vb in bucket_to_check.vbuckets if vb.master.startswith(str(server.ip))] vbs_replica = [] for replica_num in range(0, bucket_to_check.numReplicas): vbs_replica.extend([vb.id for vb in bucket_to_check.vbuckets if replica_num in vb.replica and vb.replica[replica_num].startswith(str(server.ip))]) vbuckets_servers[server]['active_vb'] = vbs_active vbuckets_servers[server]['replica_vb'] = vbs_replica return vbuckets_servers class RestConnection(object): def __new__(cls, serverInfo={}): # allow port to determine # behavior of restconnection port = None if isinstance(serverInfo, dict): if 'port' in serverInfo: port = serverInfo['port'] else: port = serverInfo.port if not port: port = CbServer.port if CbServer.use_https: port = CbServer.ssl_port if int(port) in range(9091, 9100): # return elastic search rest connection from membase.api.esrest_client import EsRestConnection obj = super(EsRestConnection,cls).__new__(cls) else: # default obj = object.__new__(cls) return obj def __init__(self, serverInfo): # serverInfo can be a json object/dictionary if isinstance(serverInfo, dict): self.ip = serverInfo["ip"] self.username = serverInfo["username"] self.password = serverInfo["password"] self.port = serverInfo["port"] self.index_port = CbServer.index_port self.fts_port = CbServer.fts_port self.query_port = CbServer.n1ql_port self.eventing_port = CbServer.eventing_port self.capi_port = CbServer.capi_port if "index_port" in list(serverInfo.keys()): self.index_port = serverInfo["index_port"] if "fts_port" in list(serverInfo.keys()): if serverInfo['fts_port']: self.fts_port = serverInfo["fts_port"] if "eventing_port" in list(serverInfo.keys()): if serverInfo['eventing_port']: self.eventing_port = serverInfo["eventing_port"] self.hostname = '' self.services = '' if "hostname" in serverInfo: self.hostname = serverInfo["hostname"] if "services" in serverInfo: self.services = serverInfo["services"] else: self.ip = serverInfo.ip self.username = serverInfo.rest_username self.password = serverInfo.rest_password self.port = serverInfo.port self.hostname = '' self.index_port = CbServer.index_port self.fts_port = CbServer.fts_port self.query_port = CbServer.n1ql_port self.eventing_port = CbServer.eventing_port self.capi_port = CbServer.capi_port self.services = "kv" self.debug_logs = False if hasattr(serverInfo, "services"): self.services = serverInfo.services if hasattr(serverInfo, 'index_port'): self.index_port = serverInfo.index_port if hasattr(serverInfo, 'query_port'): self.query_port = serverInfo.query_port if hasattr(serverInfo, 'fts_port'): if serverInfo.fts_port: self.fts_port = serverInfo.fts_port if hasattr(serverInfo, 'eventing_port'): if serverInfo.eventing_port: self.eventing_port = serverInfo.eventing_port if hasattr(serverInfo, 'hostname') and serverInfo.hostname and\ serverInfo.hostname.find(self.ip) == -1: self.hostname = serverInfo.hostname if hasattr(serverInfo, 'services'): self.services = serverInfo.services self.input = TestInputSingleton.input if self.input is not None: """ from watson, services param order and format: new_services=fts-kv-index-n1ql """ self.services_node_init = self.input.param("new_services", None) self.debug_logs = self.input.param("debug-logs", False) self.eventing_role = self.input.param('eventing_role', False) if CbServer.use_https: self.port = CbServer.ssl_port_map.get(str(self.port), str(self.port)) self.index_port = CbServer.ssl_port_map.get(str(self.index_port), str(self.index_port)) self.query_port = CbServer.ssl_port_map.get(str(self.query_port), str(self.query_port)) self.fts_port = CbServer.ssl_port_map.get(str(self.fts_port), str(self.fts_port)) self.eventing_port = CbServer.ssl_port_map.get(str(self.eventing_port), str(self.eventing_port)) self.capi_port = CbServer.ssl_port_map.get(str(self.capi_port), str(self.capi_port)) http_url = "http://%s:%s/" https_url = "https://%s:%s/" generic_url = http_url if CbServer.use_https: generic_url = https_url url_host = "%s" % self.ip if self.hostname: url_host = "%s" % self.hostname self.baseUrl = generic_url % (url_host, self.port) self.fts_baseUrl = generic_url % (url_host, self.fts_port) self.index_baseUrl = generic_url % (url_host, self.index_port) self.query_baseUrl = generic_url % (url_host, self.query_port) self.capiBaseUrl = generic_url % (url_host, self.capi_port) self.eventing_baseUrl = generic_url % (url_host, self.eventing_port) # Initialization of CBAS related params self.cbas_ip = self.ip self.cbas_port = CbServer.cbas_port if hasattr(self.input, 'cbas'): if self.input.cbas: self.cbas_node = self.input.cbas if hasattr(self.cbas_node, 'port'): self.cbas_port = self.cbas_node.port if hasattr(self.cbas_node, 'ip'): self.cbas_ip = self.cbas_node.ip if CbServer.use_https: self.cbas_port = CbServer.ssl_cbas_port self.cbas_base_url = generic_url % (self.cbas_ip, self.cbas_port) self.cbas_base_url = self.cbas_base_url[:-1] # for Node is unknown to this cluster error for iteration in range(5): http_res, success = self.init_http_request(api=self.baseUrl + "nodes/self") if not success and isinstance(http_res, str) and\ (http_res.find('Node is unknown to this cluster') > -1 or \ http_res.find('Unexpected server error, request logged') > -1): log.error("Error {0} was gotten, 5 seconds sleep before retry"\ .format(http_res)) time.sleep(5) if iteration == 2: log.error("node {0}:{1} is in a broken state!"\ .format(self.ip, self.port)) raise ServerUnavailableException(self.ip) continue else: break # determine the real couchApiBase for cluster_run # couchApiBase appeared in version 2.* if isinstance(http_res, dict): if not http_res or http_res["version"][0:2] == "1.": self.capiBaseUrl = self.baseUrl + "/couchBase" else: for iteration in range(5): if "couchApiBase" not in http_res.keys(): if self.is_cluster_mixed(): self.capiBaseUrl = self.baseUrl + "/couchBase" return time.sleep(0.2) http_res, success = self.init_http_request(self.baseUrl + 'nodes/self') else: if CbServer.use_https: self.capiBaseUrl = http_res["couchApiBaseHTTPS"] else: self.capiBaseUrl = http_res["couchApiBase"] return raise ServerUnavailableException("couchApiBase doesn't exist in nodes/self: %s " % http_res) def sasl_streaming_rq(self, bucket, timeout=120, disable_ssl_certificate_validation=True): api = self.baseUrl + 'pools/default/bucketsStreaming/{0}'.format(bucket) if isinstance(bucket, Bucket): api = self.baseUrl + 'pools/default/bucketsStreaming/{0}'.format(bucket.name) try: httplib2.Http(timeout=timeout, disable_ssl_certificate_validation=disable_ssl_certificate_validation).\ request(api, 'GET', '', headers=self._create_capi_headers()) except Exception as ex: log.warn('Exception while streaming: %s' % str(ex)) def open_sasl_streaming_connection(self, bucket, timeout=1000): if self.debug_logs: log.info("Opening sasl streaming connection for bucket {0}"\ .format((bucket, bucket.name)[isinstance(bucket, Bucket)])) t = Thread(target=self.sasl_streaming_rq, name="streaming_" + str(uuid.uuid4())[:4], args=(bucket, timeout)) try: t.start() except: log.warn("thread is not started") return None return t def is_cluster_mixed(self, timeout=120): http_res, success = self.init_http_request(self.baseUrl + 'pools/default', timeout=timeout) if http_res == 'unknown pool': return False try: versions = list({node["version"][:1] for node in http_res["nodes"]}) except: log.error('Error while processing cluster info {0}'.format(http_res)) # not really clear what to return but False see to be a good start until we figure what is happening return False if '1' in versions and '2' in versions: return True return False def is_cluster_compat_mode_greater_than(self, version): """ curl -v -X POST -u Administrator:welcome http://10.3.4.186:8091/diag/eval -d 'cluster_compat_mode:get_compat_version().' Returns : [3,2] if version = 3.2.0 """ status, content = self.diag_eval('cluster_compat_mode:get_compat_version().') if status: json_parsed = json.loads(content) cluster_ver = float("%s.%s" % (json_parsed[0], json_parsed[1])) if cluster_ver > version: return True return False def is_enterprise_edition(self): http_res, success = self.init_http_request(self.baseUrl + 'pools/default') if http_res == 'unknown pool': return False editions = [] community_nodes = [] """ get the last word in node["version"] as in "version": "2.5.1-1073-rel-enterprise" """ for node in http_res["nodes"]: editions.extend(node["version"].split("-")[-1:]) if "community" in node["version"].split("-")[-1:]: community_nodes.extend(node["hostname"].split(":")[:1]) if "community" in editions: log.error("IP(s) for node(s) with community edition {0}".format(community_nodes)) return False return True def init_http_request(self, api, timeout=120): content = None try: headers = self._create_capi_headers() status, content, header = self._http_request(api, 'GET', headers=headers, timeout=timeout) json_parsed = json.loads(content) if status: return json_parsed, True else: print("{0} with status {1}: {2}".format(api, status, json_parsed)) return json_parsed, False except ValueError as e: if content is not None: print("{0}: {1}".format(api, content)) else: print(e) return content, False def rename_node(self, hostname, username='Administrator', password='password'): params = urllib.parse.urlencode({'username': username, 'password': password, 'hostname': hostname}) api = "%snode/controller/rename" % self.baseUrl status, content, header = self._http_request(api, 'POST', params) return status, content def active_tasks(self): api = self.baseUrl + "pools/default/tasks" try: status, content, header = self._http_request(api, 'GET', headers=self._create_capi_headers()) json_parsed = json.loads(content) except ValueError as e: print(e) return "" return json_parsed def ns_server_tasks(self): api = self.baseUrl + 'pools/default/tasks' retries = 3 while retries: try: status, content, header = self._http_request(api, 'GET', headers=self._create_headers()) return json.loads(content) except ValueError: time.sleep(10) retries -= 1 return "" # DEPRECATED: use create_ddoc() instead. def create_view(self, design_doc_name, bucket_name, views, options=None): return self.create_ddoc(design_doc_name, bucket_name, views, options) def create_ddoc(self, design_doc_name, bucket, views, options=None): design_doc = DesignDocument(design_doc_name, views, options=options) if design_doc.name.find('/') != -1: design_doc.name = design_doc.name.replace('/', '%2f') design_doc.id = '_design/{0}'.format(design_doc.name) return self.create_design_document(bucket, design_doc) def create_design_document(self, bucket, design_doc): log.info("-->create_design_document") try: design_doc_name = design_doc.id api = '%s/%s/%s' % (self.capiBaseUrl, bucket, design_doc_name) if isinstance(bucket, Bucket): api = '%s/%s/%s' % (self.capiBaseUrl, bucket.name, design_doc_name) status, content, header = self._http_request(api, 'PUT', str(design_doc), headers=self._create_capi_headers()) except Exception as e: traceback.print_exc() if not status: raise DesignDocCreationException(design_doc_name, content) return json.loads(content.decode()) def is_index_triggered(self, ddoc_name, index_type='main'): run, block = self._get_indexer_task_pid(ddoc_name, index_type=index_type) if run or block: return True else: return False def _get_indexer_task_pid(self, ddoc_name, index_type='main'): active_tasks = self.active_tasks() if 'error' in active_tasks: return None if active_tasks: for task in active_tasks: if task['type'] == 'indexer' and task['indexer_type'] == index_type: for ddoc in task['design_documents']: if ddoc == ('_design/%s' % ddoc_name): return task['pid'], False if task['type'] == 'blocked_indexer' and task['indexer_type'] == index_type: for ddoc in task['design_documents']: if ddoc == ('_design/%s' % ddoc_name): return task['pid'], True return None, None def query_view(self, design_doc_name, view_name, bucket, query, timeout=120, invalid_query=False, type="view"): status, content, header = self._query(design_doc_name, view_name, bucket, type, query, timeout) if not status and not invalid_query: stat = 0 if 'status' in header: stat = int(header['status']) raise QueryViewException(view_name, content, status=stat) return json.loads(content) def _query(self, design_doc_name, view_name, bucket, view_type, query, timeout): if design_doc_name.find('/') != -1: design_doc_name = design_doc_name.replace('/', '%2f') if view_name.find('/') != -1: view_name = view_name.replace('/', '%2f') api = self.capiBaseUrl + '%s/_design/%s/_%s/%s?%s' % (bucket, design_doc_name, view_type, view_name, urllib.parse.urlencode(query)) if isinstance(bucket, Bucket): api = self.capiBaseUrl + '%s/_design/%s/_%s/%s?%s' % (bucket.name, design_doc_name, view_type, view_name, urllib.parse.urlencode(query)) log.info("index query url: {0}".format(api)) status, content, header = self._http_request(api, headers=self._create_capi_headers(), timeout=timeout) return status, content, header def view_results(self, bucket, ddoc_name, params, limit=100, timeout=120, view_name=None): status, json = self._index_results(bucket, "view", ddoc_name, params, limit, timeout=timeout, view_name=view_name) if not status: raise Exception("unable to obtain view results") return json # DEPRECATED: Incorrectly named function kept for backwards compatibility. def get_view(self, bucket, view): log.info("DEPRECATED function get_view(" + view + "). use get_ddoc()") return self.get_ddoc(bucket, view) def get_data_path(self): node_info = self.get_nodes_self() data_path = node_info.storage[0].get_data_path() return data_path def get_index_path(self): node_info = self.get_nodes_self() data_path = node_info.storage[0].get_index_path() return data_path def get_memcached_port(self): node_info = self.get_nodes_self() return node_info.memcached def get_ddoc(self, bucket, ddoc_name): status, json, meta = self._get_design_doc(bucket, ddoc_name) if not status: raise ReadDocumentException(ddoc_name, json) return json, meta # the same as Preview a Random Document on UI def get_random_key(self, bucket): api = self.baseUrl + 'pools/default/buckets/%s/localRandomKey' % bucket status, content, header = self._http_request(api, headers=self._create_capi_headers()) json_parsed = json.loads(content) if not status: raise Exception("unable to get random document/key for bucket %s" % bucket) return json_parsed def create_scope(self, bucket, scope, params=None, num_retries=3): api = self.baseUrl + 'pools/default/buckets/%s/scopes' % bucket body = {'name': scope} if params: body.update(params) params = urllib.parse.urlencode(body) headers = self._create_headers() while num_retries > 0: status, content, header = self._http_request(api, 'POST', params=params, headers=headers) log.info("{0} with params: {1}".format(api, params)) if status: json_parsed = json.loads(content) log.info("Scope created {}->{} {}".format(bucket, scope, json_parsed)) break elif header["status"] == "400": log.info("Scope already exists. Skipping create {}->{}".format(bucket, scope)) break else: time.sleep(10) num_retries -= 1 else: raise Exception("Create scope failed : status:{0},content:{1}".format(status, content)) return status def _create_single_collection(self, bucket, scope, collection, params=None): api = self.baseUrl + 'pools/default/buckets/%s/scopes/%s/collections' % (bucket, scope) body = {'name': collection} if params: body.update(params) params = urllib.parse.urlencode(body) headers = self._create_headers() status, content, header = self._http_request(api, 'POST', params=params, headers=headers) log.info("{0} with params: {1}".format(api, params)) return status,content,header def create_collection(self, bucket, scope, collection, params=None, num_retries=3): if not isinstance(collection, list): collection = [collection] for c in collection: while num_retries > 0: status, content, header = self._create_single_collection(bucket, scope, c, params) if status: json_parsed = json.loads(content) log.info("Collection created {}->{}->{} manifest:{}".format(bucket, scope, c, json_parsed)) break elif header["status"] == "400": log.info("Collection already exists. Skipping create {}->{}-{}".format(bucket, scope, c)) break else: time.sleep(10) num_retries -= 1 else: raise Exception("Create collection failed : status:{0},content:{1}".format(status, content)) return status def put_collection_scope_manifest(self, bucket, manifest, ensure_manifest=True): """ Put collection scope manifest to bulk update collection/scopes Args: ensure_manifest (bool): If set, blocks until the manifest has been applied to all nodes as the endpoint is asynchronous. """ if isinstance(bucket, Bucket): bucket = bucket.name params, headers = json.dumps(manifest), self._create_capi_headers() status, content, _ = self._http_request(f"{self.baseUrl}pools/default/buckets/{bucket}/scopes", 'PUT', params=params, headers=headers) if ensure_manifest: uid = json.loads(content)['uid'] ensure_manifest_status, manifest_content, _ = self._http_request( f"{self.baseUrl}pools/default/buckets/{bucket}/scopes/@ensureManifest/{uid}", 'POST', headers=headers) return status def get_bucket_manifest(self, bucket): if isinstance(bucket, Bucket): bucket = bucket.name api = '{0}{1}{2}{3}'.format(self.baseUrl, 'pools/default/buckets/', bucket, '/scopes') status, content, header = self._http_request(api) if status: return json.loads(content) else: raise Exception( "Cannot get manifest for bucket {}: status:{}, content:{}".format(bucket, status, content)) def _parse_manifest(self, bucket, extract=None): try: manifest = self.get_bucket_manifest(bucket) scopes = [] collections = [] for scope in manifest["scopes"]: scopes.append(scope["name"]) for collection in scope["collections"]: collections.append(collection["name"]) if extract == "scopes": return scopes elif extract == "collections": return collections except Exception as e: raise Exception("Cannot extract {} for bucket {} from manifest {}".format(extract, bucket, e.message)) def get_bucket_scopes(self, bucket): return self._parse_manifest(bucket, "scopes") def get_bucket_collections(self, bucket): return self._parse_manifest(bucket, "collections") def get_scope_collections(self, bucket, scope): try: manifest = self.get_bucket_manifest(bucket) scope_found = False collections_in_scope = [] for scopes in manifest["scopes"]: if scopes['name'] == scope: scope_found = True for collection in scopes['collections']: collections_in_scope.append(collection['name']) if not scope_found: log.error("Cannot get collections for scope {} because it does not exist".format(scope)) return collections_in_scope except Exception as e: raise Exception("Cannot get collections for bucket {}-> scope{} {}".format(bucket, scope, e.message)) def delete_scope(self, bucket, scope): api = self.baseUrl + 'pools/default/buckets/%s/scopes/%s' % (bucket, scope) headers = self._create_headers() status, content, header = self._http_request(api, 'DELETE', headers=headers) log.info("{0}".format(api)) return status def get_rest_endpoint_data(self, endpoint=None, ip=None, port=None): protocol = "http" if CbServer.use_https: port = CbServer.ssl_port_map.get(str(port), str(port)) protocol = "https" endpoint_base_url = "{0}://{1}:{2}/".format(protocol, ip, port) api = str(endpoint_base_url) + str(endpoint) print(f'Executing GET on: {api}') headers = self._create_headers() status, content, header = self._http_request(api, 'GET', headers=headers) return status, content def delete_collection(self, bucket, scope, collection): api = self.baseUrl + 'pools/default/buckets/%s/scopes/%s/collections/%s' % (bucket, scope, collection) headers = self._create_headers() status, content, header = self._http_request(api, 'DELETE', headers=headers) return status def get_collection(self, bucket): api = self.baseUrl + 'pools/default/buckets/%s/scopes' % bucket headers = self._create_headers() status, content, header = self._http_request(api, 'GET', headers=headers) return status, content def get_collection_uid(self, bucket, scope, collection): try: manifest = self.get_bucket_manifest(bucket) for scopes in manifest["scopes"]: if scopes['name'] == scope: for col in scopes['collections']: if col['name'] == collection: return col['uid'] log.error("Cannot get collection uid because {0}.{1}.{2} does not exist" .format(bucket, scope, collection)) except Exception as e: raise Exception("Exception thrown while getting collection uid {}" .format(e.message)) def run_view(self, bucket, view, name): api = self.capiBaseUrl + '/%s/_design/%s/_view/%s' % (bucket, view, name) status, content, header = self._http_request(api, headers=self._create_capi_headers()) json_parsed = json.loads(content) if not status: raise Exception("unable to create view") return json_parsed def delete_view(self, bucket, view): status, json = self._delete_design_doc(bucket, view) if not status: raise Exception("unable to delete the view") return json def spatial_results(self, bucket, spatial, params, limit=100): status, json = self._index_results(bucket, "spatial", spatial, params, limit) if not status: raise Exception("unable to obtain spatial view results") return json def create_spatial(self, bucket, spatial, function): status, json = self._create_design_doc(bucket, spatial, function) if status == False: raise Exception("unable to create spatial view") return json def get_spatial(self, bucket, spatial): status, json, meta = self._get_design_doc(bucket, spatial) if not status: raise Exception("unable to get the spatial view definition") return json, meta def delete_spatial(self, bucket, spatial): status, json = self._delete_design_doc(bucket, spatial) if not status: raise Exception("unable to delete the spatial view") return json # type_ is "view" or "spatial" def _index_results(self, bucket, type_, ddoc_name, params, limit, timeout=120, view_name=None): if view_name is None: view_name = ddoc_name query = '/{0}/_design/{1}/_{2}/{3}' api = self.capiBaseUrl + query.format(bucket, ddoc_name, type_, view_name) num_params = 0 if limit != None: num_params = 1 api += "?limit={0}".format(limit) for param in params: if num_params > 0: api += "&" else: api += "?" num_params += 1 if param in ["key", "startkey", "endkey", "start_range", "end_range"] or isinstance(params[param], bool): api += "{0}={1}".format(param, json.dumps(params[param], separators=(',', ':'))) else: api += "{0}={1}".format(param, params[param]) log.info("index query url: {0}".format(api)) status, content, header = self._http_request(api, headers=self._create_capi_headers(), timeout=timeout) json_parsed = json.loads(content) return status, json_parsed def get_couch_doc(self, doc_id, bucket="default", timeout=120): """ use couchBase uri to retrieve document from a bucket """ api = self.capiBaseUrl + '/%s/%s' % (bucket, doc_id) status, content, header = self._http_request(api, headers=self._create_capi_headers(), timeout=timeout) if not status: raise ReadDocumentException(doc_id, content) return json.loads(content) def _create_design_doc(self, bucket, name, function): api = self.capiBaseUrl + '/%s/_design/%s' % (bucket, name) status, content, header = self._http_request( api, 'PUT', function, headers=self._create_capi_headers()) json_parsed = json.loads(content) return status, json_parsed def _get_design_doc(self, bucket, name): api = self.capiBaseUrl + '/%s/_design/%s' % (bucket, name) if isinstance(bucket, Bucket): api = self.capiBaseUrl + '/%s/_design/%s' % (bucket.name, name) status, content, header = self._http_request(api, headers=self._create_capi_headers()) json_parsed = json.loads(content.decode()) meta_parsed = "" if status: # in dp4 builds meta data is in content, not in header if 'X-Couchbase-Meta' in header: meta = header['X-Couchbase-Meta'] meta_parsed = json.loads(meta) elif 'x-couchbase-meta' in header: meta = header['x-couchbase-meta'] meta_parsed = json.loads(meta) else: meta_parsed = {} try: meta_parsed["_rev"] = json_parsed["_rev"] meta_parsed["_id"] = json_parsed["_id"] except KeyError: pass return status, json_parsed, meta_parsed def _delete_design_doc(self, bucket, name): status, design_doc, meta = self._get_design_doc(bucket, name) if not status: raise Exception("unable to find for deletion design document") api = self.capiBaseUrl + '/%s/_design/%s' % (bucket, name) if isinstance(bucket, Bucket): api = self.capiBaseUrl + '/%s/_design/%s' % (bucket.name, name) status, content, header = self._http_request(api, 'DELETE', headers=self._create_capi_headers()) json_parsed = json.loads(content) return status, json_parsed def spatial_compaction(self, bucket, design_name): api = self.capiBaseUrl + '/%s/_design/%s/_spatial/_compact' % (bucket, design_name) if isinstance(bucket, Bucket): api = self.capiBaseUrl + \ '/%s/_design/%s/_spatial/_compact' % (bucket.name, design_name) status, content, header = self._http_request(api, 'POST', headers=self._create_capi_headers()) json_parsed = json.loads(content) return status, json_parsed # Make a _design/_info request def set_view_info(self, bucket, design_name): """Get view diagnostic info (node specific)""" api = self.capiBaseUrl if isinstance(bucket, Bucket): api += '/_set_view/{0}/_design/{1}/_info'.format(bucket.name, design_name) else: api += '_set_view/{0}/_design/{1}/_info'.format(bucket, design_name) status, content, header = self._http_request(api, 'GET', headers=self._create_capi_headers()) if not status: raise SetViewInfoNotFound(design_name, content) json_parsed = json.loads(content) return status, json_parsed # Make a _spatial/_info request def spatial_info(self, bucket, design_name): api = self.capiBaseUrl + \ '/%s/_design/%s/_spatial/_info' % (bucket, design_name) status, content, header = self._http_request( api, 'GET', headers=self._create_capi_headers()) json_parsed = json.loads(content) return status, json_parsed def _create_capi_headers(self): authorization = self.get_authorization(self.username, self.password) return {'Content-Type': 'application/json', 'Authorization': 'Basic %s' % authorization, 'Accept': '*/*'} def _create_capi_headers_with_auth(self, username, password): authorization = self.get_authorization(username, password) return {'Content-Type': 'application/json', 'Authorization': 'Basic %s' % authorization, 'Accept': '*/*'} def _create_headers_with_auth(self, username, password): authorization = self.get_authorization(username, password) return {'Authorization': 'Basic %s' % authorization} # authorization must be a base64 string of username:password def _create_headers(self): authorization = self.get_authorization(self.username, self.password) return {'Content-Type': 'application/x-www-form-urlencoded', 'Authorization': 'Basic %s' % authorization, 'Accept': '*/*'} # authorization must be a base64 string of username:password def _create_headers_encoded_prepared(self): authorization = self.get_authorization(self.username, self.password) return {'Content-Type': 'application/json', 'Authorization': 'Basic %s' % authorization} def _get_auth(self, headers): key = 'Authorization' if key in headers: val = headers[key] if val.startswith("Basic "): try: val = val.encode() return str("auth: " + base64.decodebytes(val[6:]).decode()) except Exception as e: print(e) return "" def _http_request(self, api, method='GET', params='', headers=None, timeout=120, disable_ssl_certificate_validation=True): if not headers: headers = self._create_headers() end_time = time.time() + timeout log.debug("Executing {0} request for following api {1} with Params: {2} and Headers: {3}"\ .format(method, api, params, headers)) count = 1 t1 = 3 while True: try: try: if TestInputSingleton.input.param("debug.api.calls", False): log.info("--->Start calling httplib2.Http({}).request({},{},{},{})".format(timeout,api,headers,method,params)) except AttributeError: pass response, content = httplib2.Http(timeout=timeout, disable_ssl_certificate_validation=disable_ssl_certificate_validation).\ request(api, method, params, headers) try: if TestInputSingleton.input.param("debug.api.calls", False): log.info( "--->End calling httplib2.Http({}).request({},{},{},{})".format(timeout, api, headers, method, params)) except AttributeError: pass if response['status'] in ['200', '201', '202']: return True, content, response else: try: json_parsed = json.loads(content) except ValueError as e: json_parsed = {} json_parsed["error"] = "status: {0}, content: {1}"\ .format(response['status'], content) reason = "unknown" if "error" in json_parsed: reason = json_parsed["error"] message = '{0} {1} body: {2} headers: {3} error: {4} reason: {5} {6} {7}'.\ format(method, api, params, headers, response['status'], reason, str(str(content).rstrip('\n')), self._get_auth(headers)) log.error(message) log.debug(''.join(traceback.format_stack())) return False, content, response except socket.error as e: if count < 4: log.error("socket error while connecting to {0} error {1} ".format(api, e)) if time.time() > end_time: log.error("Giving up due to {2}! Tried {0} connect {1} times.".format( api, count, e)) raise ServerUnavailableException(ip=self.ip) except (AttributeError, httplib2.ServerNotFoundError) as e: if count < 4: log.error("ServerNotFoundError error while connecting to {0} error {1} "\ .format(api, e)) if time.time() > end_time: log.error("Giving up due to {2}! Tried {0} connect {1} times.".\ format(api, count, e)) raise ServerUnavailableException(ip=self.ip) time.sleep(t1) count += 1 t1 *= 2 def init_cluster(self, username='Administrator', password='password', port='8091'): log.info("--> in init_cluster...{},{},{}".format(username,password,port)) api = self.baseUrl + 'settings/web' params = urllib.parse.urlencode({'port': port, 'username': username, 'password': password}) log.info('settings/web params on {0}:{1}:{2}'.format(self.ip, self.port, params)) status, content, header = self._http_request(api, 'POST', params=params) log.info("--> status:{}".format(status)) return status def init_node(self, set_node_services=None): """ need a standalone method to initialize a node that could call anywhere with quota from testconstant """ self.node_services = [] if set_node_services is None: set_node_services = self.services_node_init if set_node_services is None and self.services == "": self.node_services = ["kv"] elif set_node_services is None and self.services != "": self.node_services = self.services.split(",") elif set_node_services is not None: if "-" in set_node_services: self.node_services = set_node_services.split("-") if "," in set_node_services: self.node_services = set_node_services.split(",") kv_quota = 0 while kv_quota == 0: time.sleep(1) kv_quota = int(self.get_nodes_self().mcdMemoryReserved) info = self.get_nodes_self() kv_quota = int(info.mcdMemoryReserved * CLUSTER_QUOTA_RATIO) cb_version = info.version[:5] if cb_version in COUCHBASE_FROM_VERSION_4: if "index" in self.node_services: log.info("quota for index service will be %s MB" % (INDEX_QUOTA)) kv_quota -= INDEX_QUOTA log.info("set index quota to node %s " % self.ip) self.set_service_memoryQuota(service='indexMemoryQuota', memoryQuota=INDEX_QUOTA) if "fts" in self.node_services: log.info("quota for fts service will be %s MB" % (FTS_QUOTA)) kv_quota -= FTS_QUOTA log.info("set both index and fts quota at node %s "% self.ip) self.set_service_memoryQuota(service='ftsMemoryQuota', memoryQuota=FTS_QUOTA) if "cbas" in self.node_services: log.info("quota for cbas service will be %s MB" % (CBAS_QUOTA)) kv_quota -= CBAS_QUOTA self.set_service_memoryQuota(service = "cbasMemoryQuota", memoryQuota=CBAS_QUOTA) kv_quota -= 1 if kv_quota < MIN_KV_QUOTA: raise Exception("KV RAM needs to be more than %s MB" " at node %s" % (MIN_KV_QUOTA, self.ip)) log.info("quota for kv: %s MB" % kv_quota) self.init_cluster_memoryQuota(self.username, self.password, kv_quota) if cb_version in COUCHBASE_FROM_VERSION_4: self.init_node_services(username=self.username, password=self.password, services=self.node_services) self.init_cluster(username=self.username, password=self.password) return kv_quota def init_node_services(self, username='Administrator', password='password', hostname='127.0.0.1', port='8091', services=None): if CbServer.use_https: port = CbServer.ssl_port_map.get(str(port), str(port)) log.info("--> init_node_services({},{},{},{},{})".format(username,password,hostname,port,services)) api = self.baseUrl + '/node/controller/setupServices' if services == None: log.info(" services are marked as None, will not work") return False params_dict = {'user': username, 'password': password, 'services': ",".join(services)} if hostname == "127.0.0.1": hostname = "{0}:{1}".format(hostname, port) params = urllib.parse.urlencode({ 'hostname': hostname, 'user': username, 'password': password, 'services': ",".join(services)}) log.info('/node/controller/setupServices params on {0}: {1}:{2}'.format(self.ip, self.port, params)) status, content, header = self._http_request(api, 'POST', params) error_message = "cannot change node services after cluster is provisioned" if not status and error_message in str(content): status = True log.info("This node is already provisioned with services, we do not consider this as failure for test case") return status def get_cluster_settings(self): settings = {} api = self.baseUrl + 'settings/web' status, content, header = self._http_request(api, 'GET') if status: settings = json.loads(content) log.info('settings/web params on {0}:{1}:{2}'.format(self.ip, self.port, settings)) return settings def init_cluster_memoryQuota(self, username='Administrator', password='password', memoryQuota=256): api = self.baseUrl + 'pools/default' params = urllib.parse.urlencode({'memoryQuota': memoryQuota}) log.info('pools/default params : {0}'.format(params)) status, content, header = self._http_request(api, 'POST', params) return status def set_service_memoryQuota(self, service, username='Administrator', password='password', memoryQuota=256): ''' cbasMemoryQuota for cbas service. ftsMemoryQuota for fts service. indexMemoryQuota for index service.''' api = self.baseUrl + 'pools/default' params = urllib.parse.urlencode({service: memoryQuota}) log.info('pools/default params : {0}'.format(params)) status, content, header = self._http_request(api, 'POST', params) return status def set_cluster_name(self, name): api = self.baseUrl + 'pools/default' if name is None: name = "" params = urllib.parse.urlencode({'clusterName': name}) log.info('pools/default params : {0}'.format(params)) status, content, header = self._http_request(api, 'POST', params) return status def set_indexer_storage_mode(self, username='Administrator', password='password', storageMode='plasma'): """ StorageMode could be plasma or memopt From spock, we replace forestdb with plasma """ api = self.baseUrl + 'settings/indexes' params = urllib.parse.urlencode({'storageMode': storageMode}) error_message = "storageMode must be one of plasma, memory_optimized" log.info('settings/indexes params : {0}'.format(params)) status, content, header = self._http_request(api, 'POST', params) if not status and error_message in content.decode(): #TODO: Currently it just acknowledges if there is an error. #And proceeds with further initialization. log.info(content) return status def set_indexer_num_replica(self, num_replica=0): api = self.index_baseUrl + 'settings' params = {'indexer.settings.num_replica': num_replica} params = json.dumps(params) status, content, header = self._http_request(api, 'POST', params=params, timeout=60) error_message = "" log.info('settings params : {0}'.format(params)) status, content, header = self._http_request(api, 'POST', params) if not status and error_message in content: # TODO: Currently it just acknowledges if there is an error. # And proceeds with further initialization. log.info(content) return status def cleanup_indexer_rebalance(self, server): protocol = "http" if CbServer.use_https: protocol = "https" if server: api = "{0}://{1}:{2}/".format(protocol, server.ip, self.index_port) + 'cleanupRebalance' else: api = self.baseUrl + 'cleanupRebalance' status, content, _ = self._http_request(api, 'GET') if status: return content else: log.error("cleanupRebalance:{0},content:{1}".format(status, content)) raise Exception("indexer rebalance cleanup failed") def list_indexer_rebalance_tokens(self, server): protocol = "http" if CbServer.use_https: protocol = "https" if server: api = "{0}://{1}:{2}/".format(protocol, server.ip, self.index_port) + 'listRebalanceTokens' else: api = self.baseUrl + 'listRebalanceTokens' print(api) status, content, _ = self._http_request(api, 'GET') if status: return content.decode('utf-8') else: log.error("listRebalanceTokens:{0},content:{1}".format(status, content)) raise Exception("list rebalance tokens failed") def wait_until_cbas_is_ready(self, timeout): """ Wait until a http request can be made to the analytics service """ timeout = time.time() + timeout while time.time() < timeout: try: self.execute_statement_on_cbas("SELECT 'hello' as message", None) return True except ServerUnavailableException: self.sleep(1, "Waiting for analytics server to be ready") return False def execute_statement_on_cbas(self, statement, mode, pretty=True, timeout=70, client_context_id=None, username=None, password=None): if not username: username = self.username if not password: password = self.password api = self.cbas_base_url + "/analytics/service" headers = self._create_capi_headers_with_auth(username, password) params = {'statement': statement, 'pretty': pretty, 'client_context_id': client_context_id} if mode is not None: params['mode'] = mode params = json.dumps(params) status, content, header = self._http_request(api, 'POST', headers=headers, params=params, timeout=timeout) if status: return content elif str(header['status']) == '503': log.info("Request Rejected") raise Exception("Request Rejected") elif str(header['status']) in ['500', '400']: json_content = json.loads(content) msg = json_content['errors'][0]['msg'] if "Job requirement" in msg and "exceeds capacity" in msg: raise Exception("Capacity cannot meet job requirement") else: return content else: log.error("/analytics/service status:{0},content:{1}".format( status, content)) raise Exception("Analytics Service API failed") def delete_active_request_on_cbas(self, client_context_id, username=None, password=None): if not username: username = self.username if not password: password = self.password api = self.cbas_base_url + "/analytics/admin/active_requests?client_context_id={0}".format( client_context_id) headers = self._create_capi_headers_with_auth(username, password) status, content, header = self._http_request(api, 'DELETE', headers=headers, timeout=60) if status: return header['status'] elif str(header['status']) == '404': log.info("Request Not Found") return header['status'] else: log.error( "/analytics/admin/active_requests status:{0},content:{1}".format( status, content)) raise Exception("Analytics Admin API failed") def get_cluster_ceritificate(self): api = self.baseUrl + 'pools/default/certificate' status, content, _ = self._http_request(api, 'GET') if status: return content.decode("utf-8") else: log.error("/pools/default/certificate status:{0},content:{1}".format(status, content)) raise Exception("certificate API failed") def regenerate_cluster_certificate(self): api = self.baseUrl + 'controller/regenerateCertificate' status, content, _ = self._http_request(api, 'POST') if status: return content else: log.error("controller/regenerateCertificate status:{0},content:{1}".format(status, content)) raise Exception("regenerateCertificate API failed") def __remote_clusters(self, api, op, remoteIp, remotePort, username, password, name, demandEncryption=0, certificate='', encryptionType="half"): param_map = {'hostname': "{0}:{1}".format(remoteIp, remotePort), 'username': username, 'password': password, 'name':name} from TestInput import TestInputServer remote = TestInputServer() remote.ip = remoteIp remote.rest_username = username remote.rest_password = password remote.port = remotePort if demandEncryption: param_map ['demandEncryption'] = 'on' if certificate != '': param_map['certificate'] = certificate if self.check_node_versions("5.5") and RestConnection(remote).check_node_versions("5.5"): # 5.5.0 and above param_map['secureType'] = encryptionType elif self.check_node_versions("5.0") and RestConnection(remote).check_node_versions("5.0"): param_map['encryptionType'] = encryptionType params = urllib.parse.urlencode(param_map) retries = 5 while retries: status, content, _ = self._http_request(api, 'POST', params) # sample response : # [{"name":"two","uri":"/pools/default/remoteClusters/two","validateURI":"/pools/default/remoteClusters/two?just_validate=1","hostname":"127.0.0.1:9002","username":"Administrator"}] remoteCluster = json.loads(content) if status or "Duplicate cluster" in remoteCluster["_"]: return remoteCluster retries -= 1 raise Exception("remoteCluster API '{0} remote cluster' failed".format(op)) def add_remote_cluster(self, remoteIp, remotePort, username, password, name, demandEncryption=0, certificate='', encryptionType="full"): # example : password:password username:Administrator hostname:127.0.0.1:9002 name:two msg = "adding remote cluster hostname:{0}:{1} with username:password {2}:{3} name:{4} to source node: {5}:{6}" log.info(msg.format(remoteIp, remotePort, username, password, name, self.ip, self.port)) api = self.baseUrl + 'pools/default/remoteClusters' return self.__remote_clusters(api, 'add', remoteIp, remotePort, username, password, name, demandEncryption, certificate, encryptionType) def add_remote_cluster_new(self, remoteIp, remotePort, username, password, name, demandEncryption=0, certificate=''): # example : password:password username:Administrator hostname:127.0.0.1:9002 name:two msg = "adding remote cluster hostname:{0}:{1} with username:password {2}:{3} name:{4} to source node: {5}:{6}" log.info(msg.format(remoteIp, remotePort, username, password, name, self.ip, self.port)) api = self.baseUrl + 'pools/default/remoteClusters' return self.__remote_clusters(api, 'add', remoteIp, remotePort, username, password, name, demandEncryption, certificate) def modify_remote_cluster(self, remoteIp, remotePort, username, password, name, demandEncryption=0, certificate='', encryptionType="half"): log.info("modifying remote cluster name:{0}".format(name)) api = self.baseUrl + 'pools/default/remoteClusters/' + urllib.parse.quote(name) return self.__remote_clusters(api, 'modify', remoteIp, remotePort, username, password, name, demandEncryption, certificate, encryptionType) def get_remote_clusters(self): remote_clusters = [] api = self.baseUrl + 'pools/default/remoteClusters/' params = urllib.parse.urlencode({}) status, content, header = self._http_request(api, 'GET', params) if status: remote_clusters = json.loads(content) return remote_clusters def remove_all_remote_clusters(self): remote_clusters = self.get_remote_clusters() for remote_cluster in remote_clusters: try: if remote_cluster["deleted"] == False: self.remove_remote_cluster(remote_cluster["name"]) except KeyError: # goxdcr cluster references will not contain "deleted" field self.remove_remote_cluster(remote_cluster["name"]) def remove_remote_cluster(self, name): # example : name:two msg = "removing remote cluster name:{0}".format(urllib.parse.quote(name)) log.info(msg) api = self.baseUrl + 'pools/default/remoteClusters/{0}?'.format(urllib.parse.quote(name)) params = urllib.parse.urlencode({}) status, content, header = self._http_request(api, 'DELETE', params) #sample response : "ok" if not status: log.error("failed to remove remote cluster: status:{0},content:{1}".format(status, content)) raise Exception("remoteCluster API 'remove cluster' failed") # replicationType:continuous toBucket:default toCluster:two fromBucket:default # defaults at https://github.com/couchbase/goxdcr/metadata/replication_settings.go#L20-L33 def start_replication(self, replicationType, fromBucket, toCluster, rep_type="xmem", toBucket=None, xdcr_params={}): toBucket = toBucket or fromBucket msg = "starting {0} replication type:{1} from {2} to {3} in the remote" \ " cluster {4} with settings {5}" log.info(msg.format(replicationType, rep_type, fromBucket, toBucket, toCluster, xdcr_params)) api = self.baseUrl + 'controller/createReplication' param_map = {'replicationType': replicationType, 'toBucket': toBucket, 'fromBucket': fromBucket, 'toCluster': toCluster, 'type': rep_type} param_map.update(xdcr_params) params = urllib.parse.urlencode(param_map) retries = 3 while retries: try: status, content, header = self._http_request(api, 'POST', params) # response : {"id": "replication_id"} json_parsed = json.loads(content) log.info("Replication created with id: {0}".format(json_parsed['id'])) return json_parsed['id'] except ValueError: time.sleep(10) retries -= 1 except: raise Exception("create replication failed: status:{0},content:{1}".format(status, content)) def get_replications(self): replications = [] content = self.ns_server_tasks() for item in content: if not isinstance(item, dict): log.error("Unexpected error while retrieving pools/default/tasks : {0}".format(content)) raise Exception("Unexpected error while retrieving pools/default/tasks : {0}".format(content)) if item["type"] == "xdcr": replications.append(item) return replications def remove_all_replications(self): replications = self.get_replications() for replication in replications: self.stop_replication(replication["cancelURI"]) def stop_replication(self, uri): log.info("Deleting replication {0}".format(uri)) api = self.baseUrl[:-1] + uri retries = 3 while retries: status, content, header = self._http_request(api, 'DELETE') if status: log.info("Replication deleted successfully") return else: retries -= 1 time.sleep(10) raise Exception("delete replication failed: status:{0}, content:{1}".format(status, content)) def remove_all_recoveries(self): recoveries = [] content = self.ns_server_tasks() for item in content: if item["type"] == "recovery": recoveries.append(item) for recovery in recoveries: api = self.baseUrl + recovery["stopURI"] status, content, header = self._http_request(api, 'POST') if not status: raise CBRecoveryFailedException("impossible to stop cbrecovery by {0}".format(api)) log.info("recovery stopped by {0}".format(api)) # params serverIp : the server to add to this cluster # raises exceptions when # unauthorized user # server unreachable # can't add the node to itself ( TODO ) # server already added # returns otpNode def add_node(self, user='', password='', remoteIp='', port='8091', zone_name='', services=None): otpNode = None protocol = "http" if CbServer.use_https or CbServer.n2n_encryption: port = CbServer.ssl_port protocol = "https" # if ip format is ipv6 and enclosing brackets are not found, # enclose self.ip and remoteIp if self.ip.count(':') and self.ip[0] != '[': self.ip = '[' + self.ip + ']' if remoteIp.count(':') and remoteIp[0] != '[': remoteIp = '[' + remoteIp + ']' log.info('adding remote node @{0}:{1} to this cluster @{2}:{3}'\ .format(remoteIp, port, self.ip, self.port)) if zone_name == '': api = self.baseUrl + 'controller/addNode' else: api = self.baseUrl + 'pools/default/serverGroups' if self.is_zone_exist(zone_name): zones = self.get_zone_names() api = "/".join((api, zones[zone_name], "addNode")) log.info("node {0} will be added to zone {1}".format(remoteIp, zone_name)) else: raise Exception("There is not zone with name: %s in cluster" % zone_name) params = urllib.parse.urlencode({'hostname': "{0}://{1}:{2}".format(protocol, remoteIp, port), 'user': user, 'password': password}) if services != None: services = ','.join(services) params = urllib.parse.urlencode({'hostname': "{0}://{1}:{2}".format(protocol, remoteIp, port), 'user': user, 'password': password, 'services': services}) if self.monitorRebalance(): status, content, header = self._http_request(api, 'POST', params) if status: json_parsed = json.loads(content) otpNodeId = json_parsed['otpNode'] otpNode = OtpNode(otpNodeId) if otpNode.ip == '127.0.0.1': otpNode.ip = self.ip else: self.print_UI_logs() try: # print logs from node that we want to add wanted_node = deepcopy(self) wanted_node.ip = remoteIp wanted_node.print_UI_logs() except Exception as ex: self.log(ex) if content.find(b'Prepare join failed. Node is already part of cluster') >= 0: raise ServerAlreadyJoinedException(nodeIp=self.ip, remoteIp=remoteIp) elif content.find(b'Prepare join failed. Joining node to itself is not allowed') >= 0: raise ServerSelfJoinException(nodeIp=self.ip, remoteIp=remoteIp) else: log.error('add_node error : {0}'.format(content)) raise AddNodeException(nodeIp=self.ip, remoteIp=remoteIp, reason=content) else: raise AddNodeException(nodeIp=self.ip, remoteIp=remoteIp, reason="Rebalance error, cannot add node") return otpNode # params serverIp : the server to add to this cluster # raises exceptions when # unauthorized user # server unreachable # can't add the node to itself ( TODO ) # server already added # returns otpNode def do_join_cluster(self, user='', password='', remoteIp='', port='8091', zone_name='', services=None): otpNode = None if CbServer.use_https: port = CbServer.ssl_port log.info('adding remote node @{0}:{1} to this cluster @{2}:{3}'\ .format(remoteIp, port, self.ip, self.port)) api = self.baseUrl + '/node/controller/doJoinCluster' params = urllib.parse.urlencode({'hostname': "{0}:{1}".format(remoteIp, port), 'user': user, 'password': password}) if services != None: services = ','.join(services) params = urllib.parse.urlencode({'hostname': "{0}:{1}".format(remoteIp, port), 'user': user, 'password': password, 'services': services}) status, content, header = self._http_request(api, 'POST', params) if status: json_parsed = json.loads(content) otpNodeId = json_parsed['otpNode'] otpNode = OtpNode(otpNodeId) if otpNode.ip == '127.0.0.1': otpNode.ip = self.ip else: self.print_UI_logs() try: # print logs from node that we want to add wanted_node = deepcopy(self) wanted_node.ip = remoteIp wanted_node.print_UI_logs() except Exception as ex: self.log(ex) if content.find('Prepare join failed. Node is already part of cluster') >= 0: raise ServerAlreadyJoinedException(nodeIp=self.ip, remoteIp=remoteIp) elif content.find('Prepare join failed. Joining node to itself is not allowed') >= 0: raise ServerSelfJoinException(nodeIp=self.ip, remoteIp=remoteIp) else: log.error('add_node error : {0}'.format(content)) raise AddNodeException(nodeIp=self.ip, remoteIp=remoteIp, reason=content) return otpNode def eject_node(self, user='', password='', otpNode=None): if not otpNode: log.error('otpNode parameter required') return False api = self.baseUrl + 'controller/ejectNode' params = urllib.parse.urlencode({'otpNode': otpNode, 'user': user, 'password': password}) status, content, header = self._http_request(api, 'POST', params) if status: log.info('ejectNode successful') else: if content.find('Prepare join failed. Node is already part of cluster') >= 0: raise ServerAlreadyJoinedException(nodeIp=self.ip, remoteIp=otpNode) else: # TODO : raise an exception here log.error('eject_node error {0}'.format(content)) return True def force_eject_node(self): self.diag_eval("gen_server:cast(ns_cluster, leave).") self.check_delay_restart_coucbase_server() """ when we do reset couchbase server by force reject, couchbase server will not down right away but delay few seconds to be down depend on server spec. This fx will detect that delay and return true when couchbase server down and up again after force reject """ def check_delay_restart_coucbase_server(self): api = self.baseUrl + 'nodes/self' headers = self._create_headers() break_out = 0 count_cbserver_up = 0 while break_out < 60 and count_cbserver_up < 2: try: response, content = httplib2.Http(timeout=120).request(api, 'GET', '', headers) if response['status'] in ['200', '201', '202'] and count_cbserver_up == 0: log.info("couchbase server is up but down soon.") time.sleep(1) break_out += 1 # time needed for couchbase server reload after reset config if break_out == 7: log.info("couchbase server may be up already") count_cbserver_up = 1 elif response['status'] in ['200', '201', '202']: count_cbserver_up = 2 log.info("couchbase server is up again in few seconds") time.sleep(7) except (socket.error, AttributeError) as e: log.info("couchbase server is down. Waiting for couchbase server up") time.sleep(2) break_out += 1 count_cbserver_up = 1 pass if break_out >= 60: raise Exception("Couchbase server did not start after 60 seconds") def fail_over(self, otpNode=None, graceful=False): if otpNode is None: log.error('otpNode parameter required') return False api = self.baseUrl + 'controller/failOver' if graceful: api = self.baseUrl + 'controller/startGracefulFailover' params = urllib.parse.urlencode({'otpNode': otpNode}) status, content, header = self._http_request(api, 'POST', params) if status: log.info('fail_over node {0} successful'.format(otpNode)) else: log.error('fail_over node {0} error : {1}'.format(otpNode, content)) raise FailoverFailedException(content) return status def set_recovery_type(self, otpNode=None, recoveryType=None): log.info("Going to set recoveryType={0} for node :: {1}".format(recoveryType, otpNode)) if otpNode is None: log.error('otpNode parameter required') return False if recoveryType is None: log.error('recoveryType is not set') return False api = self.baseUrl + 'controller/setRecoveryType' params = urllib.parse.urlencode({'otpNode': otpNode, 'recoveryType': recoveryType}) status, content, header = self._http_request(api, 'POST', params) if status: log.info('recoveryType for node {0} set successful'.format(otpNode)) else: log.error('recoveryType node {0} not set with error : {1}'.format(otpNode, content)) raise SetRecoveryTypeFailed(content) return status def add_back_node(self, otpNode=None): if otpNode is None: log.error('otpNode parameter required') return False api = self.baseUrl + 'controller/reAddNode' params = urllib.parse.urlencode({'otpNode': otpNode}) status, content, header = self._http_request(api, 'POST', params) if status: log.info('add_back_node {0} successful'.format(otpNode)) else: log.error('add_back_node {0} error : {1}'.format(otpNode, content)) raise InvalidArgumentException('controller/reAddNode', parameters=params) return status def rebalance(self, otpNodes=[], ejectedNodes=[], deltaRecoveryBuckets=None): knownNodes = ','.join(otpNodes) ejectedNodesString = ','.join(ejectedNodes) if deltaRecoveryBuckets == None: params = {'knownNodes': knownNodes, 'ejectedNodes': ejectedNodesString, 'user': self.username, 'password': self.password} else: deltaRecoveryBuckets = ",".join(deltaRecoveryBuckets) params = {'knownNodes': knownNodes, 'ejectedNodes': ejectedNodesString, 'deltaRecoveryBuckets': deltaRecoveryBuckets, 'user': self.username, 'password': self.password} log.info('rebalance params : {0}'.format(params)) params = urllib.parse.urlencode(params) api = self.baseUrl + "controller/rebalance" status, content, header = self._http_request(api, 'POST', params) if status: log.info('rebalance operation started') else: log.error('rebalance operation failed: {0}'.format(content)) # extract the error raise InvalidArgumentException('controller/rebalance with error message {0}'.format(content), parameters=params) return status def diag_eval(self, code, print_log=True): api = '{0}{1}'.format(self.baseUrl, 'diag/eval/') status, content, header = self._http_request(api, "POST", code) if content: try: content = content.decode('utf-8') except (UnicodeDecodeError, AttributeError): pass if print_log: log.info("/diag/eval status on {0}:{1}: {2} content: {3} command: {4}". format(self.ip, self.port, status, content, code)) return status, content def set_chk_max_items(self, max_items): status, content = self.diag_eval("ns_config:set(chk_max_items, " + str(max_items) + ")") return status, content def set_chk_period(self, period): status, content = self.diag_eval("ns_config:set(chk_period, " + str(period) + ")") return status, content def set_enable_flow_control(self, flow=True, bucket='default'): flow_control = "false" if flow: flow_control = "true" code = "ns_bucket:update_bucket_props(\"" + bucket + "\", [{extra_config_string, \"upr_enable_flow_control=" + flow_control + "\"}])" status, content = self.diag_eval(code) return status, content def change_flusher_total_batch_limit(self, flusher_total_batch_limit=3, bucket='default'): code = "ns_bucket:update_bucket_props(\"" + bucket \ + "\", [{extra_config_string, " \ + "\"flusher_total_batch_limit=" \ + str(flusher_total_batch_limit) + "\"}])." status, content = self.diag_eval(code) return status, content def diag_master_events(self): api = '{0}{1}'.format(self.baseUrl, 'diag/masterEvents?o=1') status, content, header = self._http_request(api, "GET") log.info("diag/masterEvents?o=1 status: {0} content: {1}".format(status, content)) return status, content def get_admin_credentials(self): code = 'ns_config:search_node_prop(node(), ns_config:latest(), memcached, admin_user)' status, id = self.diag_eval(code) code = 'ns_config:search_node_prop(node(), ns_config:latest(), memcached, admin_pass)' status, password = self.diag_eval(code) return id.strip('"'), password.strip('"') def monitorRebalance(self, stop_if_loop=True): start = time.time() progress = 0 retry = 0 same_progress_count = 0 previous_progress = 0 while progress != -1 and (progress != 100 or \ self._rebalance_progress_status() == 'running') and retry < 20: # -1 is error , -100 means could not retrieve progress progress = self._rebalance_progress() if progress == -100: log.error("unable to retrieve rebalanceProgress.try again in 1 second") retry += 1 else: retry = 0 if stop_if_loop: # reset same_progress_count if get a different result, # or progress is still O # (it may take a long time until the results are different from 0) if previous_progress != progress or progress == 0: previous_progress = progress same_progress_count = 0 else: same_progress_count += 1 if same_progress_count > 50: log.error("apparently rebalance progress code in infinite loop:" " {0}".format(progress)) return False # sleep 10 seconds to printout less log time.sleep(10) if progress < 0: log.error("rebalance progress code : {0}".format(progress)) return False else: duration = time.time() - start if duration > 10: sleep = 10 else: sleep = duration log.info('rebalance progress took {:.02f} seconds '.format(duration)) log.info("sleep for {0} seconds after rebalance...".format(sleep)) time.sleep(sleep) return True def _rebalance_progress_status(self): api = self.baseUrl + "pools/default/rebalanceProgress" status, content, header = self._http_request(api) json_parsed = json.loads(content) if status: if "status" in json_parsed: return json_parsed['status'] else: return None def _rebalance_status_and_progress(self): """ Returns a 2-tuple capturing the rebalance status and progress, as follows: ('running', progress) - if rebalance is running ('none', 100) - if rebalance is not running (i.e. assumed done) (None, -100) - if there's an error getting the rebalance progress from the server (None, -1) - if the server responds but there's no information on what the status of rebalance is The progress is computed as a average of the progress of each node rounded to 2 decimal places. Throws RebalanceFailedException if rebalance progress returns an error message """ avg_percentage = -1 rebalance_status = None api = self.baseUrl + "pools/default/rebalanceProgress" try: status, content, header = self._http_request(api) except ServerUnavailableException as e: log.error(e) return None, -100 json_parsed = json.loads(content) if status: if "status" in json_parsed: rebalance_status = json_parsed["status"] if "errorMessage" in json_parsed: msg = '{0} - rebalance failed'.format(json_parsed) log.error(msg) self.print_UI_logs() raise RebalanceFailedException(msg) elif rebalance_status == "running": total_percentage = 0 count = 0 for key in json_parsed: if key.find('@') >= 0: ns_1_dictionary = json_parsed[key] percentage = ns_1_dictionary['progress'] * 100 count += 1 total_percentage += percentage if count: avg_percentage = (total_percentage // count) else: avg_percentage = 0 log.info('rebalance percentage : {0:.02f} %'. format(round(avg_percentage, 2))) else: avg_percentage = 100 else: avg_percentage = -100 return rebalance_status, avg_percentage def _rebalance_progress(self): return self._rebalance_status_and_progress()[1] def log_client_error(self, post): api = self.baseUrl + 'logClientError' status, content, header = self._http_request(api, 'POST', post) if not status: log.error('unable to logClientError') return status, content, header def trigger_index_compaction(self, timeout=120): node = None api = self.index_baseUrl + 'triggerCompaction' status, content, header = self._http_request(api, timeout=timeout) if not status: raise Exception(content) def set_index_settings(self, setting_json, timeout=120): api = self.index_baseUrl + 'settings' status, content, header = self._http_request(api, 'POST', json.dumps(setting_json)) if not status: raise Exception(content) log.info("{0} set".format(setting_json)) def set_index_settings_internal(self, setting_json, timeout=120): api = self.index_baseUrl + 'internal/settings' status, content, header = self._http_request(api, 'POST', json.dumps(setting_json)) if not status: if header['status']=='404': log.info("This endpoint is introduced only in 5.5.0, hence not found. Redirecting the request to the old endpoint") self.set_index_settings(setting_json, timeout) else: raise Exception(content) log.info("{0} set".format(setting_json)) def get_index_settings(self, timeout=120): node = None api = self.index_baseUrl + 'settings' status, content, header = self._http_request(api, timeout=timeout) if not status: raise Exception(content) return json.loads(content) def get_index_storage_mode(self, timeout=120): api = self.index_baseUrl + 'settings' status, content, header = self._http_request(api, timeout=timeout) if not status: raise Exception(content) return json.loads(content)["indexer.settings.storage_mode"] def set_index_planner_settings(self, setting, timeout=120): api = self.index_baseUrl + 'settings/planner?{0}'.format(setting) status, content, header = self._http_request(api, timeout=timeout) if not status: raise Exception(content) return json.loads(content) def get_index_stats(self, timeout=120, index_map=None): api = self.index_baseUrl + 'stats' status, content, header = self._http_request(api, timeout=timeout) if status: json_parsed = json.loads(content) index_map = RestParser().parse_index_stats_response(json_parsed, index_map=index_map) return index_map def get_index_stats_collections(self, timeout=120, index_map=None): api = self.index_baseUrl + 'stats' status, content, header = self._http_request(api, timeout=timeout) if status: json_parsed = json.loads(content) index_map = RestParser().parse_index_stats_response_collections(json_parsed, index_map=index_map) return index_map def get_all_index_stats(self, timeout=120, inst_id_filter=[], consumer_filter=None, text=False): """return: json object or text response of :9102/stats""" api = self.index_baseUrl + 'stats' all_index_stats = {} if inst_id_filter: inst_id_filter = json.dumps(inst_id_filter) elif consumer_filter: api += f"?consumerFilter={consumer_filter}" else: inst_id_filter = "" status, content, _ = self._http_request(api, timeout=timeout, params=inst_id_filter) if status: if text: all_index_stats = content.decode("utf8").replace('":', '": ').replace(",", ", ") else: all_index_stats = json.loads(content) return all_index_stats def get_index_official_stats(self, timeout=120, index_map=None, bucket="", scope="", collection=""): api = self.index_baseUrl + 'api/v1/stats' if bucket: api += f'/`{bucket.replace('%', '%25')}`' if scope: api += f'.{scope}' if collection: api += f'.{collection}' status, content, header = self._http_request(api, timeout=timeout) if status: json_parsed = json.loads(content) return json_parsed def get_indexes_count(self): indexes_count = {} index_map = self.get_index_storage_stats() for bucket, indexes in index_map.items(): for index, stats in indexes.items(): indexes_count[index] = stats["MainStore"]["count"] return indexes_count def get_index_storage_stats(self, timeout=120, index_map=None): api = self.index_baseUrl + 'stats/storage' status, content, header = self._http_request(api, timeout=timeout) if not status: raise Exception(content) json_parsed = json.loads(content) index_storage_stats = {} for index_stats in json_parsed: bucket = index_stats["Index"].split(":")[0] index_name = index_stats["Index"].split(":")[-1] if bucket not in list(index_storage_stats.keys()): index_storage_stats[bucket] = {} index_storage_stats[bucket][index_name] = index_stats["Stats"] return index_storage_stats def get_indexer_stats(self, timeout=120, index_map=None, baseUrl=None): if baseUrl is None: api = self.index_baseUrl + 'stats' else: api = baseUrl + 'stats' index_map = {} status, content, header = self._http_request(api, timeout=timeout) if status: json_parsed = json.loads(content) for key in list(json_parsed.keys()): tokens = key.split(":") val = json_parsed[key] if len(tokens) == 1: field = tokens[0] index_map[field] = val return index_map def get_indexer_metadata(self, timeout=120, index_map=None): api = self.index_baseUrl + 'getIndexStatus' index_map = {} status, content, header = self._http_request(api, timeout=timeout) if status: json_parsed = json.loads(content) for key in list(json_parsed.keys()): tokens = key.split(":") val = json_parsed[key] if len(tokens) == 1: field = tokens[0] index_map[field] = val return index_map def get_indexer_internal_stats(self, timeout=120, index_map=None): api = self.index_baseUrl + 'settings?internal=ok' index_map = {} status, content, header = self._http_request(api, timeout=timeout) if status: json_parsed = json.loads(content) for key in list(json_parsed.keys()): tokens = key.split(":") val = json_parsed[key] if len(tokens) == 1: field = tokens[0] index_map[field] = val return index_map def trigger_compaction(self, timeout=120): api = self.index_baseUrl + 'plasmaDiag' command = {'Cmd': 'listDBs'} status, content, header = self._http_request(api, 'POST', json.dumps(command), timeout=timeout) for l in list(iter(str(content, 'utf-8').splitlines())): try: x, id = l.split(" : ") if id: log.info(f'Triggering compaction for instance id {id}') compact_command = {'Cmd': 'compactAll', 'Args': [int(id)]} status, content, header = self._http_request(api, 'POST', json.dumps(compact_command)) if not status: log.error(f'Failed to trigger compaction : {content}') except ValueError: pass def get_index_status(self, timeout=120, index_map=None): api = self.baseUrl + 'indexStatus' index_map = {} status, content, header = self._http_request(api, timeout=timeout) if status: json_parsed = json.loads(content) index_map = RestParser().parse_index_status_response(json_parsed) return index_map def get_index_id_map(self, timeout=120): api = self.baseUrl + 'indexStatus' index_map = {} status, content, header = self._http_request(api, timeout=timeout) if status: json_parsed = json.loads(content) for map in json_parsed["indexes"]: bucket_name = map['bucket'] if bucket_name not in list(index_map.keys()): index_map[bucket_name] = {} index_name = map['index'] index_map[bucket_name][index_name] = {} index_map[bucket_name][index_name]['id'] = map['id'] return index_map def get_index_statements(self, timeout=120): api = self.index_baseUrl + 'getIndexStatement' index_map = {} status, content, header = self._http_request(api, timeout=timeout) if status: json_parsed = json.loads(content) return json_parsed # returns node data for this host def get_nodes_self(self, timeout=120): node = None api = self.baseUrl + 'nodes/self' status, content, header = self._http_request(api, timeout=timeout) if status: json_parsed = json.loads(content) node = RestParser().parse_get_nodes_response(json_parsed) return node def get_ip_from_ini_file(self): """ in alternate address, we need to get hostname from ini file """ return self.ip def node_statuses(self, timeout=120): nodes = [] api = self.baseUrl + 'nodeStatuses' status, content, header = self._http_request(api, timeout=timeout) json_parsed = json.loads(content) if status: for key in json_parsed: # each key contain node info value = json_parsed[key] # Create an OtpNode object given the id and status. # Note the OtpNode object grabs the ip address from the id. node = OtpNode(id=value['otpNode'], status=value['status']) if node.ip == 'cb.local': node.ip = self.ip node.id = node.id.replace('cb.local', self.ip.__str__()) # The ip address grabbed from the id is '127.0.0.1' or '::1' # when the node is not part of a cluster. This can be amended # to the ip address in the TestInputServer object that is # provided. if node.ip in ['127.0.0.1', '[::1]']: node.ip = self.ip node.port = int(key[key.rfind(":") + 1:]) node.replication = value['replication'] if 'gracefulFailoverPossible' in list(value.keys()): node.gracefulFailoverPossible = value['gracefulFailoverPossible'] else: node.gracefulFailoverPossible = False nodes.append(node) return nodes def cluster_status(self): parsed = {} api = self.baseUrl + 'pools/default' status, content, header = self._http_request(api) if status: parsed = json.loads(content) return parsed def fetch_vbucket_map(self, bucket="default"): """Return vbucket map for bucket Keyword argument: bucket -- bucket name """ api = self.baseUrl + 'pools/default/buckets/' + bucket status, content, header = self._http_request(api) _stats = json.loads(content) return _stats['vBucketServerMap']['vBucketMap'] def get_vbucket_map_and_server_list(self, bucket="default"): """ Return server list, replica and vbuckets map that matches to server list """ vbucket_map = self.fetch_vbucket_map(bucket) api = self.baseUrl + 'pools/default/buckets/' + bucket status, content, header = self._http_request(api) _stats = json.loads(content) num_replica = _stats['vBucketServerMap']['numReplicas'] vbucket_map = _stats['vBucketServerMap']['vBucketMap'] servers = _stats['vBucketServerMap']['serverList'] server_list = [] for node in servers: node = node.split(":") server_list.append(node[0]) return vbucket_map, server_list, num_replica def get_pools_info(self): parsed = {} api = self.baseUrl + 'pools' status, content, header = self._http_request(api) json_parsed = json.loads(content) if status: parsed = json_parsed return parsed def get_pools_default(self, query='', timeout=30): parsed = {} api = self.baseUrl + 'pools/default' if query: api += "?" + query status, content, header = self._http_request(api, timeout=timeout) json_parsed = json.loads(content) if status: parsed = json_parsed return parsed def get_cluster_stats(self): """ Reads cluster nodes statistics using `pools/default` rest GET method :return stat_dict - Dictionary of CPU & Memory status each cluster node: """ stat_dict = dict() json_output = self.get_pools_default() if 'nodes' in json_output: for node_stat in json_output['nodes']: stat_dict[node_stat['hostname']] = dict() stat_dict[node_stat['hostname']]['services'] = node_stat['services'] stat_dict[node_stat['hostname']]['cpu_utilization'] = node_stat['systemStats']['cpu_utilization_rate'] stat_dict[node_stat['hostname']]['mem_free'] = node_stat['systemStats']['mem_free'] stat_dict[node_stat['hostname']]['mem_total'] = node_stat['systemStats']['mem_total'] stat_dict[node_stat['hostname']]['swap_mem_used'] = node_stat['systemStats']['swap_used'] stat_dict[node_stat['hostname']]['swap_mem_total'] = node_stat['systemStats']['swap_total'] return stat_dict def get_pools(self): version = None api = self.baseUrl + 'pools' status, content, header = self._http_request(api) json_parsed = json.loads(content) if status: version = MembaseServerVersion(json_parsed['implementationVersion'], json_parsed['componentsVersion']) return version def get_buckets(self, num_retries=3, poll_interval=15): buckets = [] api = '{0}{1}'.format(self.baseUrl, 'pools/default/buckets?basic_stats=true') buckets_are_received = False status = "" content = "" while num_retries > 0: try: # get all the buckets status, content, header = self._http_request(api) json_parsed = json.loads(content) if status: for item in json_parsed: bucketInfo = RestParser().parse_get_bucket_json(item) buckets.append(bucketInfo) buckets_are_received = True break else: log.error("Response status is: False, response content is: {0}".format(content)) num_retries -= 1 time.sleep(poll_interval) except Exception as e: num_retries -= 1 log.error(e) log.error('{0} seconds sleep before calling get_buckets again...'.format(poll_interval)) time.sleep(poll_interval) if not buckets_are_received: log.error("Could not get buckets list from the following api: {0}".format(api)) log.error("Last response status is: {0}".format(status)) log.error("Last response content is: {0}".format(content)) return buckets def get_bucket_by_name(self,bucket_name): # get all the buckets buckets = [] api = '{0}{1}'.format(self.baseUrl, 'pools/default/buckets?basic_stats=true') status, content, header = self._http_request(api) json_parsed = json.loads(content) if status: for item in json_parsed: bucketInfo = RestParser().parse_get_bucket_json(item) if bucketInfo.name == bucket_name: buckets.append(bucketInfo) return buckets def get_buckets_itemCount(self): # get all the buckets bucket_map = {} api = '{0}{1}'.format(self.baseUrl, 'pools/default/buckets?basic_stats=true') status, content, header = self._http_request(api) json_parsed = json.loads(content) if status: for item in json_parsed: bucketInfo = RestParser().parse_get_bucket_json(item) bucket_map[bucketInfo.name] = bucketInfo.stats.itemCount return bucket_map def get_bucket_stats_for_node(self, bucket='default', node=None): if not node: log.error('node_ip not specified') return None stats = {} api = "{0}{1}{2}{3}{4}:{5}{6}".format(self.baseUrl, 'pools/default/buckets/', bucket, "/nodes/", node.ip, node.port, "/stats") status, content, header = self._http_request(api) if status: json_parsed = json.loads(content) op = json_parsed["op"] samples = op["samples"] for stat_name in samples: if stat_name not in stats: if len(samples[stat_name]) == 0: stats[stat_name] = [] else: stats[stat_name] = samples[stat_name][-1] else: raise Exception("Duplicate entry in the stats command {0}".format(stat_name)) return stats def get_node_settings(self, setting_name=None): api = "{0}{1}".format(self.fts_baseUrl, 'api/manager') status, content, header = self._http_request(api) json_parsed = json.loads(content) options_vals = json_parsed['mgr']['options'] if setting_name in options_vals.keys(): return options_vals[setting_name] log.error("Setting {0} not available".format(setting_name)) def get_bucket_status(self, bucket): if not bucket: log.error("Bucket Name not Specified") return None api = self.baseUrl + 'pools/default/buckets' status, content, header = self._http_request(api) if status: json_parsed = json.loads(content) for item in json_parsed: if item["name"] == bucket: return item["nodes"][0]["status"] log.error("Bucket {0} doesn't exist".format(bucket)) return None def fetch_bucket_stats(self, bucket='default', zoom='minute'): """Return deserialized buckets stats. Keyword argument: bucket -- bucket name zoom -- stats zoom level (minute | hour | day | week | month | year) """ api = self.baseUrl + 'pools/default/buckets/{0}/stats?zoom={1}'.format(bucket, zoom) log.info(api) status, content, header = self._http_request(api) return json.loads(content) def set_query_index_api_mode(self, index_api_mode=3): api = self.query_baseUrl + 'admin/settings' query_api_setting = {"max-index-api": index_api_mode} status, content, header = self._http_request(api, 'POST', json.dumps(query_api_setting)) if not status: raise Exception(content) log.info("{0} set".format(query_api_setting)) def fetch_bucket_xdcr_stats(self, bucket='default', zoom='minute'): """Return deserialized bucket xdcr stats. Keyword argument: bucket -- bucket name zoom -- stats zoom level (minute | hour | day | week | month | year) """ api = self.baseUrl + 'pools/default/buckets/@xdcr-{0}/stats?zoom={1}'.format(bucket, zoom) status, content, header = self._http_request(api) return json.loads(content) def fetch_system_stats(self): """Return deserialized system stats.""" api = self.baseUrl + 'pools/default/' status, content, header = self._http_request(api) return json.loads(content) def get_xdc_queue_size(self, bucket): """Fetch bucket stats and return the latest value of XDC replication queue size""" bucket_stats = self.fetch_bucket_xdcr_stats(bucket) return bucket_stats['op']['samples']['replication_changes_left'][-1] def get_dcp_queue_size(self, bucket): """Fetch bucket stats and return the latest value of DCP queue size""" bucket_stats = self.fetch_bucket_stats(bucket) return bucket_stats['op']['samples']['ep_dcp_xdcr_items_remaining'][-1] def get_active_key_count(self, bucket): """Fetch bucket stats and return the bucket's curr_items count""" bucket_stats = self.fetch_bucket_stats(bucket) ret_val = -1 retries = 10 while retries > 0: try: ret_val = bucket_stats['op']['samples']['curr_items'][-1] return ret_val except KeyError as err: log.error(f"get_active_key_count() function for bucket {bucket} reported an error {err}") log.error(f"Corresponding bucket stats JSON is {bucket_stats}") time.sleep(2) retries = retries - 1 return ret_val def get_replica_key_count(self, bucket): """Fetch bucket stats and return the bucket's replica count""" bucket_stats = self.fetch_bucket_stats(bucket) return bucket_stats['op']['samples']['vb_replica_curr_items'][-1] def get_nodes(self, get_all_nodes=False): nodes = [] api = self.baseUrl + 'pools/default' status, content, header = self._http_request(api) count = 0 while not content and count < 7: log.info("sleep 5 seconds and retry") time.sleep(5) status, content, header = self._http_request(api) count += 1 if count == 7: raise Exception("could not get node info after 30 seconds") json_parsed = json.loads(content) if status: if "nodes" in json_parsed: for json_node in json_parsed["nodes"]: node = RestParser().parse_get_nodes_response(json_node) node.rest_username = self.username node.rest_password = self.password if node.ip == "127.0.0.1": node.ip = self.ip # Only add nodes which are active on cluster if get_all_nodes or node.clusterMembership == 'active': nodes.append(node) else: log.info("Node {0} not part of cluster {1}".format(node.ip, node.clusterMembership)) return nodes # this method returns the number of node in cluster def get_cluster_size(self): nodes = self.get_nodes() node_ip = [] for node in nodes: node_ip.append(node.ip) log.info("Number of node(s) in cluster is {0} node(s)".format(len(node_ip))) return len(node_ip) """ this medthod return version on node that is not initialized yet """ def get_nodes_version(self): node = self.get_nodes_self() version = node.version log.info("Node version in cluster {0}".format(version)) return version # this method returns the versions of nodes in cluster def get_nodes_versions(self, logging=True): nodes = self.get_nodes() versions = [] for node in nodes: versions.append(node.version) if logging: log.info("Node versions in cluster {0}".format(versions)) return versions def get_major_version(self): """ Returns the major version of the node (e.g. 6.5) """ return self.get_nodes_self().major_version def check_cluster_compatibility(self, version): """ Check if all nodes in cluster are of versions equal or above the version required. :param version: Version to check the cluster compatibility for. Should be of format major_ver.minor_ver. For example: 5.0, 4.5, 5.1 :return: True if cluster is compatible with the version specified, False otherwise. Return None if cluster is uninitialized. """ nodes = self.get_nodes() if not nodes: # If nodes returned is None, it means that the cluster is not initialized yet and hence cluster # compatibility cannot be found. Return None return None major_ver, minor_ver = version.split(".") compatibility = int(major_ver) * 65536 + int(minor_ver) is_compatible = True for node in nodes: clusterCompatibility = int(node.clusterCompatibility) if clusterCompatibility < compatibility: is_compatible = False return is_compatible # this method returns the services of nodes in cluster - implemented for Sherlock def get_nodes_services(self): nodes = self.get_nodes() map = {} for node in nodes: key = "{0}:{1}".format(node.ip, node.port) map[key] = node.services return map # Check node version def check_node_versions(self, check_version="4.0"): versions = self.get_nodes_versions() if versions[0] < check_version: return False return True def get_bucket_stats(self, bucket='default'): stats = {} status, json_parsed = self.get_bucket_stats_json(bucket) if status: op = json_parsed["op"] samples = op["samples"] for stat_name in samples: if samples[stat_name]: last_sample = len(samples[stat_name]) - 1 if last_sample: stats[stat_name] = samples[stat_name][last_sample] return stats def get_fts_stats(self, index_name=None, bucket_name=None, stat_name=None): """ List of fts stats available as of 03/16/2017 - default:default_idx3:avg_queries_latency: 0, default:default_idx3:batch_merge_count: 0, default:default_idx3:doc_count: 0, default:default_idx3:iterator_next_count: 0, default:default_idx3:iterator_seek_count: 0, default:default_idx3:num_bytes_live_data: 0, default:default_idx3:num_bytes_used_disk: 0, default:default_idx3:num_mutations_to_index: 0, default:default_idx3:num_pindexes: 0, default:default_idx3:num_pindexes_actual: 0, default:default_idx3:num_pindexes_target: 0, default:default_idx3:num_recs_to_persist: 0, default:default_idx3:reader_get_count: 0, default:default_idx3:reader_multi_get_count: 0, default:default_idx3:reader_prefix_iterator_count: 0, default:default_idx3:reader_range_iterator_count: 0, default:default_idx3:timer_batch_store_count: 0, default:default_idx3:timer_data_delete_count: 0, default:default_idx3:timer_data_update_count: 0, default:default_idx3:timer_opaque_get_count: 0, default:default_idx3:timer_opaque_set_count: 0, default:default_idx3:timer_rollback_count: 0, default:default_idx3:timer_snapshot_start_count: 0, default:default_idx3:total_bytes_indexed: 0, default:default_idx3:total_bytes_query_results: 0, default:default_idx3:total_compactions: 0, default:default_idx3:total_queries: 0, default:default_idx3:total_queries_error: 0, default:default_idx3:total_queries_slow: 0, default:default_idx3:total_queries_timeout: 0, default:default_idx3:total_request_time: 0, default:default_idx3:total_term_searchers: 0, default:default_idx3:writer_execute_batch_count: 0, :param index_name: name of the index :param bucket_name: source bucket :param stat_name: any of the above :return: """ api = "{0}{1}".format(self.fts_baseUrl, 'api/nsstats') attempts = 0 while attempts < 5: status, content, header = self._http_request(api) json_parsed = json.loads(content) if bucket_name is None and index_name is None and stat_name is None: return status, content if bucket_name is None and index_name is None: key = stat_name else: key = bucket_name+':'+index_name+':'+stat_name if key in json_parsed: return status, json_parsed[key] attempts += 1 log.info("Stat {0} not available yet".format(stat_name)) time.sleep(1) log.error("ERROR: Stat {0} error on {1} on bucket {2}". format(stat_name, index_name, bucket_name)) def start_fts_index_compaction(self, index_name): api = "{0}{1}".format(self.fts_baseUrl, f'api/index/{index_name}/tasks') params = {"op": "merge"} status, content, header = self._http_request(api, method='POST', params=json.dumps(params, ensure_ascii=False), headers=self._create_capi_headers(), timeout=30) json_parsed = json.loads(content) return status, json_parsed def get_fts_index_compactions(self, index_name): api = "{0}{1}".format(self.fts_baseUrl, f'api/index/{index_name}/tasks') params = {"op": "get"} status, content, header = self._http_request(api, method='POST', params=json.dumps(params, ensure_ascii=False), headers=self._create_capi_headers(), timeout=30) json_parsed = json.loads(content) return status, json_parsed def cancel_fts_index_compaction(self, index_name=None, uuid=None): api = "{0}{1}".format(self.fts_baseUrl, f'api/index/{index_name}/tasks') params = {"op": "cancel", "uuid": uuid} status, content, header = self._http_request(api, method='POST', params=json.dumps(params, ensure_ascii=False), headers=self._create_capi_headers(), timeout=30) json_parsed = json.loads(content) return status, json_parsed def get_bucket_stats_json(self, bucket='default'): stats = {} api = "{0}{1}{2}{3}".format(self.baseUrl, 'pools/default/buckets/', bucket, "/stats") if isinstance(bucket, Bucket): api = '{0}{1}{2}{3}'.format(self.baseUrl, 'pools/default/buckets/', bucket.name, "/stats") status, content, header = self._http_request(api) json_parsed = json.loads(content) return status, json_parsed def get_bucket_json(self, bucket='default'): api = '{0}{1}{2}'.format(self.baseUrl, 'pools/default/buckets/', bucket) if isinstance(bucket, Bucket): api = '{0}{1}{2}'.format(self.baseUrl, 'pools/default/buckets/', bucket.name) status, content, header = self._http_request(api) if not status: raise GetBucketInfoFailed(bucket, content) return json.loads(content) def get_bucket_maxTTL(self, bucket='default'): bucket_info = self.get_bucket_json(bucket=bucket) return bucket_info['maxTTL'] def get_bucket_compressionMode(self, bucket='default'): bucket_info = self.get_bucket_json(bucket=bucket) info = self.get_nodes_self() if 5.5 > float(info.version[:3]): bucket_info['compressionMode'] = "off" return bucket_info['compressionMode'] def is_lww_enabled(self, bucket='default'): bucket_info = self.get_bucket_json(bucket=bucket) try: if bucket_info['conflictResolutionType'] == 'lww': return True except KeyError: return False def get_bucket(self, bucket='default', num_attempt=1, timeout=1): bucketInfo = None try: bucket = bucket.decode() except AttributeError: pass api = '%s%s%s?basic_stats=true' % (self.baseUrl, 'pools/default/buckets/', bucket) if isinstance(bucket, Bucket): api = '%s%s%s?basic_stats=true' % (self.baseUrl, 'pools/default/buckets/', bucket.name) status, content, header = self._http_request(api) num = 1 while not status and num_attempt > num: log.error("try to get {0} again after {1} sec".format(api, timeout)) time.sleep(timeout) status, content, header = self._http_request(api) num += 1 if status: bucketInfo = RestParser().parse_get_bucket_response(content) return bucketInfo def get_vbuckets(self, bucket='default'): b = self.get_bucket(bucket) return None if not b else b.vbuckets def delete_bucket(self, bucket='default', num_retries=3, poll_interval=5): api = '%s%s%s' % (self.baseUrl, 'pools/default/buckets/', bucket) if isinstance(bucket, Bucket): api = '%s%s%s' % (self.baseUrl, 'pools/default/buckets/', bucket.name) status = False while num_retries > 0: try: status, content, header = self._http_request(api, 'DELETE') if int(header['status']) == 500: # According to http://docs.couchbase.com/couchbase-manual-2.5/cb-rest-api/#deleting-buckets # the cluster will return with 500 if it failed to nuke # the bucket on all of the nodes within 30 secs log.warning("Bucket deletion timed out waiting for all nodes, retrying...") num_retries -= 1 time.sleep(poll_interval) else: break except Exception as e: num_retries -= 1 log.error(e) log.error('{0} seconds sleep before calling delete_bucket again...'.format(poll_interval)) time.sleep(poll_interval) return status def delete_all_buckets(self): buckets = self.get_buckets() for bucket in buckets: if isinstance(bucket, Bucket): api = '%s%s%s' % (self.baseUrl, 'pools/default/buckets/', bucket.name) self._http_request(api, 'DELETE') '''Load any of the three sample buckets''' def load_sample(self, sample_name, poll_interval=3, max_wait_time=1200, max_error_retries=3): api = '{0}{1}'.format(self.baseUrl, "sampleBuckets/install") data = '["{0}"]'.format(sample_name) status, content, header = self._http_request(api, 'POST', data) # Allow the sample bucket to be loaded self.wait_until_bucket_loaded(sample_name, poll_interval, max_wait_time, max_error_retries) return status def wait_until_bucket_loaded(self, bucket_name, poll_interval=3, max_wait_time=1200, max_error_retries=3): max_time = time.time() + float(max_wait_time) is_bucket_loaded = False response = "" api = '{0}{1}'.format(self.baseUrl, "pools/default/buckets/{}".format(bucket_name)) previous_doc_count = 0 while time.time() < max_time and max_error_retries > 0: time.sleep(poll_interval) status, content, response = self._http_request(api, method='GET') data = json.loads(content) current_doc_count = int(data["basicStats"]["itemCount"]) if status: if current_doc_count == previous_doc_count: is_bucket_loaded = True break else: previous_doc_count = current_doc_count else: max_error_retries -= 1 log.warning("Something wrong happened while getting bucket {0} items count, retrying.".format(bucket_name)) log.warning("Server response is {0}".format(str(response))) if not is_bucket_loaded: log.error("Bucket {0} was not loaded completely") log.error("Last response is: {0}".format(str(response))) # figure out the proxy port def create_bucket(self, bucket='', ramQuotaMB=1, replicaNumber=1, proxyPort=11211, bucketType='membase', replica_index=1, threadsNumber=3, flushEnabled=1, evictionPolicy='valueOnly', lww=False, maxTTL=None, compressionMode='passive', storageBackend='couchstore'): api = '{0}{1}'.format(self.baseUrl, 'pools/default/buckets') params = urllib.parse.urlencode({}) init_params = {'name': bucket, 'ramQuotaMB': ramQuotaMB, 'replicaNumber': replicaNumber, # 'proxyPort': proxyPort, 'bucketType': bucketType, 'replicaIndex': replica_index, 'threadsNumber': threadsNumber, 'flushEnabled': flushEnabled, 'evictionPolicy': evictionPolicy} if bucketType == "memcached": log.info("Create memcached bucket") # 'replicaNumber' is not valid for memcached buckets init_params.pop("replicaNumber", None) if lww: init_params['conflictResolutionType'] = 'lww' if maxTTL: init_params['maxTTL'] = maxTTL if compressionMode and self.is_enterprise_edition(): init_params['compressionMode'] = compressionMode if bucketType == 'ephemeral': del init_params['replicaIndex'] # does not apply to ephemeral buckets, and is even rejected # bucket storage is applicable only for membase bucket if bucketType == "membase": init_params['storageBackend'] = storageBackend pre_spock = not self.check_cluster_compatibility("5.0") if pre_spock: init_params['proxyPort'] = proxyPort params = urllib.parse.urlencode(init_params) log.info("{0} with param: {1}".format(api, params)) create_start_time = time.time() maxwait = 60 for numsleep in range(maxwait): status, content, header = self._http_request(api, 'POST', params) if status: break elif (int(header['status']) == 503 and '{"_":"Bucket with given name still exists"}'.encode('utf-8') in content): log.info("The bucket still exists, sleep 1 sec and retry") time.sleep(1) else: raise BucketCreationException(ip=self.ip, bucket_name=bucket) if (numsleep + 1) == maxwait: log.error("Tried to create the bucket for {0} secs.. giving up". format(maxwait)) raise BucketCreationException(ip=self.ip, bucket_name=bucket) create_time = time.time() - create_start_time log.info("{0:.02f} seconds to create bucket {1}". format(round(create_time, 2), bucket)) return status def change_bucket_props(self, bucket, ramQuotaMB=None, replicaNumber=None, proxyPort=None, replicaIndex=None, flushEnabled=None, timeSynchronization=None, maxTTL=None, compressionMode=None): api = '{0}{1}{2}'.format(self.baseUrl, 'pools/default/buckets/', bucket) if isinstance(bucket, Bucket): api = '{0}{1}{2}'.format(self.baseUrl, 'pools/default/buckets/', bucket.name) params = urllib.parse.urlencode({}) params_dict = {} existing_bucket = self.get_bucket_json(bucket) if ramQuotaMB: params_dict["ramQuotaMB"] = ramQuotaMB if replicaNumber: params_dict["replicaNumber"] = replicaNumber #if proxyPort: # params_dict["proxyPort"] = proxyPort if replicaIndex: params_dict["replicaIndex"] = replicaIndex if flushEnabled: params_dict["flushEnabled"] = flushEnabled if timeSynchronization: params_dict["timeSynchronization"] = timeSynchronization if maxTTL: params_dict["maxTTL"] = maxTTL if compressionMode and self.is_enterprise_edition(): params_dict["compressionMode"] = compressionMode params = urllib.parse.urlencode(params_dict) log.info("%s with param: %s" % (api, params)) status, content, header = self._http_request(api, 'POST', params) if timeSynchronization: if status: raise Exception("Erroneously able to set bucket settings %s for bucket on time-sync" % (params, bucket)) return status, content if not status: raise Exception("Unable to set bucket settings %s for bucket" % (params, bucket)) log.info("bucket %s updated" % bucket) return status # return AutoFailoverSettings def get_autofailover_settings(self): settings = None api = self.baseUrl + 'settings/autoFailover' status, content, header = self._http_request(api) json_parsed = json.loads(content) if status: settings = AutoFailoverSettings() settings.enabled = json_parsed["enabled"] settings.count = json_parsed["count"] settings.timeout = json_parsed["timeout"] settings.failoverOnDataDiskIssuesEnabled = json_parsed["failoverOnDataDiskIssues"]["enabled"] settings.failoverOnDataDiskIssuesTimeout = json_parsed["failoverOnDataDiskIssues"]["timePeriod"] settings.maxCount = json_parsed["maxCount"] settings.failoverServerGroup = json_parsed["failoverServerGroup"] if json_parsed["canAbortRebalance"]: settings.can_abort_rebalance = json_parsed["canAbortRebalance"] return settings def update_autofailover_settings(self, enabled, timeout, canAbortRebalance=False, enable_disk_failure=False, disk_timeout=120, maxCount=1, enableServerGroup=False): params_dict = {} params_dict['timeout'] = timeout if enabled: params_dict['enabled'] = 'true' else: params_dict['enabled'] = 'false' if canAbortRebalance: params_dict['canAbortRebalance'] = 'true' if enable_disk_failure: params_dict['failoverOnDataDiskIssues[enabled]'] = 'true' params_dict['failoverOnDataDiskIssues[timePeriod]'] = disk_timeout else: params_dict['failoverOnDataDiskIssues[enabled]'] = 'false' params_dict['maxCount'] = maxCount if enableServerGroup: params_dict['failoverServerGroup'] = 'true' else: params_dict['failoverServerGroup'] = 'false' params = urllib.parse.urlencode(params_dict) api = self.baseUrl + 'settings/autoFailover' log.info('settings/autoFailover params : {0}'.format(params)) status, content, header = self._http_request(api, 'POST', params) if not status: log.warning('''failed to change autofailover_settings! See MB-7282. Workaround: wget --user=Administrator --password=asdasd --post-data='rpc:call(mb_master:master_node(), erlang, apply ,[fun () -> erlang:exit(erlang:whereis(mb_master), kill) end, []]).' http://localhost:8091/diag/eval''') return status # return AutoReprovisionSettings def get_autoreprovision_settings(self): settings = None api = self.baseUrl + 'settings/autoReprovision' status, content, header = self._http_request(api) json_parsed = json.loads(content) if status: settings = AutoReprovisionSettings() settings.enabled = json_parsed["enabled"] settings.count = json_parsed["count"] settings.max_nodes = json_parsed["max_nodes"] return settings def update_autoreprovision_settings(self, enabled, maxNodes=1): if enabled: params = urllib.parse.urlencode({'enabled': 'true', 'maxNodes': maxNodes}) else: params = urllib.parse.urlencode({'enabled': 'false', 'maxNodes': maxNodes}) api = self.baseUrl + 'settings/autoReprovision' log.info('settings/autoReprovision params : {0}'.format(params)) status, content, header = self._http_request(api, 'POST', params) if not status: log.error('failed to change autoReprovision_settings!') return status def reset_autofailover(self): api = self.baseUrl + 'settings/autoFailover/resetCount' status, content, header = self._http_request(api, 'POST', '') return status def reset_autoreprovision(self): api = self.baseUrl + 'settings/autoReprovision/resetCount' status, content, header = self._http_request(api, 'POST', '') return status def set_alerts_settings(self, recipients, sender, email_username, email_password, email_host='localhost', email_port=25, email_encrypt='false', alerts='auto_failover_node,auto_failover_maximum_reached'): api = self.baseUrl + 'settings/alerts' params = urllib.parse.urlencode({'enabled': 'true', 'recipients': recipients, 'sender': sender, 'emailUser': email_username, 'emailPass': email_password, 'emailHost': email_host, 'emailPort': email_port, 'emailEncrypt': email_encrypt, 'alerts': alerts}) log.info('settings/alerts params : {0}'.format(params)) status, content, header = self._http_request(api, 'POST', params) return status def get_alerts_settings(self): api = self.baseUrl + 'settings/alerts' status, content, header = self._http_request(api) json_parsed = json.loads(content) if not status: raise Exception("unable to get autofailover alerts settings") return json_parsed def disable_alerts(self): api = self.baseUrl + 'settings/alerts' params = urllib.parse.urlencode({'enabled': 'false'}) log.info('settings/alerts params : {0}'.format(params)) status, content, header = self._http_request(api, 'POST', params) return status def set_cas_drift_threshold(self, bucket, ahead_threshold_in_millisecond, behind_threshold_in_millisecond): api = self.baseUrl + 'pools/default/buckets/{0}'. format( bucket ) params_dict ={'driftAheadThresholdMs': ahead_threshold_in_millisecond, 'driftBehindThresholdMs': behind_threshold_in_millisecond} params = urllib.parse.urlencode(params_dict) log.info("%s with param: %s" % (api, params)) status, content, header = self._http_request(api, 'POST', params) return status def stop_rebalance(self, wait_timeout=10): api = self.baseUrl + '/controller/stopRebalance' status, content, header = self._http_request(api, 'POST') if status: for i in range(int(wait_timeout)): if self._rebalance_progress_status() == 'running': log.warning("rebalance is not stopped yet after {0} sec".format(i + 1)) time.sleep(1) status = False else: log.info("rebalance was stopped") status = True break else: log.error("Rebalance is not stopped due to {0}".format(content)) return status def set_data_path(self, data_path=None, index_path=None, cbas_path=None): end_point = '/nodes/self/controller/settings' api = self.baseUrl + end_point paths = HTTPHeaderDict() set_path = False if data_path: set_path = True paths.add('path', data_path) if index_path: set_path = True paths.add('index_path', index_path) if cbas_path: set_path = True import ast for cbas in ast.literal_eval(cbas_path): paths.add('cbas_path', cbas) if set_path: params = urllib.parse.urlencode(paths) log.info('%s : %s' % (end_point, params)) status, content, header = self._http_request(api, 'POST', params) if status: log.info("Setting data_path: {0}: status {1}".format(data_path, status)) else: log.error("Unable to set data_path {0} : {1}".format(data_path, content)) return status def get_database_disk_size(self, bucket='default'): api = self.baseUrl + "pools/{0}/buckets".format(bucket) status, content, header = self._http_request(api) json_parsed = json.loads(content) # disk_size in MB disk_size = (json_parsed[0]["basicStats"]["diskUsed"]) // (1024 * 1024) return status, disk_size def ddoc_compaction(self, design_doc_id, bucket="default"): api = self.baseUrl + "pools/default/buckets/%s/ddocs/%s/controller/compactView" % \ (bucket, design_doc_id) status, content, header = self._http_request(api, 'POST') if not status: raise CompactViewFailed(design_doc_id, content) log.info("compaction for ddoc '%s' was triggered" % design_doc_id) def check_compaction_status(self, bucket_name): tasks = self.active_tasks() if "error" in tasks: raise Exception(tasks) for task in tasks: log.info("Task is {0}".format(task)) if task["type"] == "bucket_compaction": if task["bucket"] == bucket_name: return True, task["progress"] return False, None def change_memcached_t_option(self, value): cmd = '[ns_config:update_key({node, N, memcached}, fun (PList)' + \ ' -> lists:keystore(verbosity, 1, PList, {verbosity, \'-t ' + str(value) + '\'}) end)' + \ ' || N <- ns_node_disco:nodes_wanted()].' return self.diag_eval(cmd) def set_ensure_full_commit(self, value): """Dynamic settings changes""" # the boolean paramter is used to turn on/off ensure_full_commit(). In XDCR, # issuing checkpoint in this function is expensive and not necessary in some # test, turning off this function would speed up some test. The default value # is ON. cmd = 'ns_config:set(ensure_full_commit_enabled, {0}).'.format(value) return self.diag_eval(cmd) def get_internalSettings(self, param): """allows to get internalSettings values for: indexAwareRebalanceDisabled, rebalanceIndexWaitingDisabled, rebalanceIndexPausingDisabled, maxParallelIndexers, maxParallelReplicaIndexers, maxBucketCount""" api = self.baseUrl + "internalSettings" status, content, header = self._http_request(api) json_parsed = json.loads(content) param = json_parsed[param] return param def set_internalSetting(self, param, value): "Set any internal setting" api = self.baseUrl + "internalSettings" if isinstance(value, bool): value = str(value).lower() params = urllib.parse.urlencode({param : value}) status, content, header = self._http_request(api, "POST", params) log.info('Update internal setting {0}={1}'.format(param, value)) return status def get_replication_for_buckets(self, src_bucket_name, dest_bucket_name): replications = self.get_replications() for replication in replications: if src_bucket_name in replication['source'] and \ replication['target'].endswith(dest_bucket_name): return replication raise XDCRException("Replication with Src bucket: {0} and Target bucket: {1} not found". format(src_bucket_name, dest_bucket_name)) """ By default, these are the global replication settings - { optimisticReplicationThreshold:256, workerBatchSize:500, failureRestartInterval:1, docBatchSizeKb":2048, checkpointInterval":1800, maxConcurrentReps":32} You can override these using set_xdcr_param() """ def set_xdcr_param(self, src_bucket_name, dest_bucket_name, param, value): replication = self.get_replication_for_buckets(src_bucket_name, dest_bucket_name) api = self.baseUrl[:-1] + replication['settingsURI'] value = str(value).lower() params = urllib.parse.urlencode({param: value}) status, content, header = self._http_request(api, "POST", params) if not status: raise XDCRException("Unable to set replication setting {0}={1} on bucket {2} on node {3}". format(param, value, src_bucket_name, self.ip)) else: log.info("Updated {0}={1} on bucket '{2}' on {3}".format(param, value, src_bucket_name, self.ip)) def set_xdcr_params(self, src_bucket_name, dest_bucket_name, param_value_map): replication = self.get_replication_for_buckets(src_bucket_name, dest_bucket_name) api = self.baseUrl[:-1] + replication['settingsURI'] params = urllib.parse.urlencode(param_value_map) status, content, header = self._http_request(api, "POST", params) if not status: raise XDCRException("{0} \n Unable to set replication settings {1} on bucket {2} on node {3}". format(content, param_value_map, src_bucket_name, self.ip)) else: log.info("Updated {0} on bucket '{1}' on {2}".format(param_value_map, src_bucket_name, self.ip)) def set_global_xdcr_param(self, param, value): api = self.baseUrl[:-1] + "/settings/replications" value = str(value).lower() params = urllib.parse.urlencode({param: value}) status, _, _ = self._http_request(api, "POST", params) if not status: raise XDCRException("Unable to set replication setting {0}={1} on node {2}". format(param, value, self.ip)) log.info("Updated {0}={1} on {2}".format(param, value, self.ip)) # Gets per-replication setting value def get_xdcr_param(self, src_bucket_name, dest_bucket_name, param): replication = self.get_replication_for_buckets(src_bucket_name, dest_bucket_name) api = self.baseUrl[:-1] + replication['settingsURI'] status, content, _ = self._http_request(api) if not status: raise XDCRException("Unable to get replication setting {0} on bucket {1} on node {2}". format(param, src_bucket_name, self.ip)) json_parsed = json.loads(content) # when per-replication settings match global(internal) settings, # the param is not returned by rest API # in such cases, return internalSetting value for the param try: return json_parsed[param] except KeyError: if param == 'pauseRequested': return False else: param = 'xdcr' + param[0].upper() + param[1:] log.info("Trying to fetch xdcr param:{0} from global settings". format(param)) return self.get_internalSettings(param) # Returns a boolean value on whether replication def is_replication_paused(self, src_bucket_name, dest_bucket_name): return self.get_xdcr_param(src_bucket_name, dest_bucket_name, 'pauseRequested') def is_replication_paused_by_id(self, repl_id): repl_id = repl_id.replace('/', '%2F') api = self.baseUrl + 'settings/replications/' + repl_id status, content, header = self._http_request(api) if not status: raise XDCRException("Unable to retrieve pause resume status for replication {0}". format(repl_id)) repl_stats = json.loads(content) return repl_stats['pauseRequested'] def pause_resume_repl_by_id(self, repl_id, param, value): repl_id = repl_id.replace('/', '%2F') api = self.baseUrl + 'settings/replications/' + repl_id params = urllib.parse.urlencode({param: value}) status, _, _ = self._http_request(api, "POST", params) if not status: raise XDCRException("Unable to update {0}={1} setting for replication {2}". format(param, value, repl_id)) log.info("Updated {0}={1} on {2}".format(param, value, repl_id)) def get_recent_xdcr_vb_ckpt(self, repl_id): command = 'ns_server_testrunner_api:grab_all_goxdcr_checkpoints().' status, content = self.diag_eval(command, print_log=False) if not status: raise Exception("Unable to get recent XDCR checkpoint information") repl_ckpt_list = json.loads(content) # a single decoding will only return checkpoint record as string # convert string to dict using json chkpt_doc_string = repl_ckpt_list['/ckpt/%s/0' % repl_id].replace('"', '\"') chkpt_dict = json.loads(chkpt_doc_string) return chkpt_dict['checkpoints'][0] def get_repl_stat(self, repl_id, src_bkt="default", stat="data_replicated", timestamp=None): repl_id = repl_id.replace('/', '%2F') api = self.baseUrl + "pools/default/buckets/" + src_bkt + "/stats/replications%2F" \ + repl_id + "%2F" + stat if timestamp: api += "?haveTStamp=" + timestamp status, content, header = self._http_request(api) if not status: raise XDCRException("Unable to retrieve {0} stat for replication {1}". format(stat, repl_id)) repl_stat = json.loads(content) samples = [] for node in self.get_nodes(): items = repl_stat["nodeStats"]["{0}:8091".format(node.ip)] samples.append(items) return samples """ Start of FTS rest apis""" def set_fts_ram_quota(self, value): """set fts ram quota""" api = self.baseUrl + "pools/default" params = urllib.parse.urlencode({"ftsMemoryQuota": value}) status, content, _ = self._http_request(api, "POST", params) if status: log.info("SUCCESS: FTS RAM quota set to {0}mb".format(value)) else: raise Exception("Error setting fts ram quota: {0}".format(content)) return status def set_maxConcurrentPartitionMovesPerNode(self, value): api = self.fts_baseUrl + "api/managerOptions" params = {"maxConcurrentPartitionMovesPerNode": str(value)} status, content, _ = self._http_request(api, "PUT", params=json.dumps(params, ensure_ascii=False), headers=self._create_capi_headers()) if status: log.info("SUCCESS: FTS maxConcurrentPartitionMovesPerNode set to {0}".format(value)) return status def set_disableFileTransferRebalance(self, value): api = self.fts_baseUrl + "api/managerOptions" params = {"disableFileTransferRebalance": str(value)} status, content, _ = self._http_request(api, "PUT", params=json.dumps(params, ensure_ascii=False), headers=self._create_capi_headers()) if status: log.info("SUCCESS: FTS disableFileTransferRebalance set to {0}".format(value)) return status def set_maxFeedsPerDCPAgent(self, value): api = self.fts_baseUrl + "api/managerOptions" params = {"maxFeedsPerDCPAgent": str(value)} status, content, _ = self._http_request(api, "PUT", params=json.dumps(params, ensure_ascii=False), headers=self._create_capi_headers()) if status: log.info("SUCCESS: FTS maxFeedsPerDCPAgent set to {0}".format(value)) return status def set_maxDCPAgents(self, value): api = self.fts_baseUrl + "api/managerOptions" params = {"maxDCPAgents": str(value)} status, content, _ = self._http_request(api, "PUT", params=json.dumps(params, ensure_ascii=False), headers=self._create_capi_headers()) if status: log.info("SUCCESS: FTS maxDCPAgents set to {0}".format(value)) return status def create_fts_index(self, index_name, params): """create or edit fts index , returns {"status":"ok"} on success""" api = self.fts_baseUrl + "api/index/{0}".format(index_name) log.info(json.dumps(params)) status, content, header = self._http_request(api, 'PUT', json.dumps(params, ensure_ascii=False), headers=self._create_capi_headers(), timeout=30) if status: log.info("Index {0} created".format(index_name)) else: raise Exception("Error creating index: {0}".format(content)) return status def update_fts_index(self, index_name, index_def): api = self.fts_baseUrl + "api/index/{0}".format(index_name) log.info(json.dumps(index_def, indent=3)) status, content, header = self._http_request(api, 'PUT', json.dumps(index_def, ensure_ascii=False), headers=self._create_capi_headers(), timeout=30) if status: log.info("Index/alias {0} updated".format(index_name)) else: raise Exception("Error updating index: {0}".format(content)) return status def get_fts_index_definition(self, name, timeout=30): """ get fts index/alias definition """ json_parsed = {} api = self.fts_baseUrl + "api/index/{0}".format(name) status, content, header = self._http_request( api, headers=self._create_capi_headers(), timeout=timeout) if status: json_parsed = json.loads(content) return status, json_parsed def get_fts_index_doc_count(self, name, timeout=30): """ get number of docs indexed""" json_parsed = {} api = self.fts_baseUrl + "api/index/{0}/count".format(name) status, content, header = self._http_request( api, headers=self._create_capi_headers(), timeout=timeout) if status: json_parsed = json.loads(content) return json_parsed['count'] def get_fts_index_uuid(self, name, timeout=30): """ Returns uuid of index/alias """ json_parsed = {} api = self.fts_baseUrl + "api/index/{0}".format(name) status, content, header = self._http_request( api, headers=self._create_capi_headers(), timeout=timeout) if status: json_parsed = json.loads(content) return json_parsed['indexDef']['uuid'] def get_fts_pindex_stats(self, timeout=30): """ Returns uuid of index/alias """ json_parsed = {} api = self.fts_baseUrl + "api/stats" status, content, header = self._http_request( api, headers=self._create_capi_headers(), timeout=timeout) if status: json_parsed = json.loads(content) return json_parsed['pindexes'] def delete_fts_index(self, name): """ delete fts index/alias """ api = self.fts_baseUrl + "api/index/{0}".format(name) status, content, header = self._http_request( api, 'DELETE', headers=self._create_capi_headers()) return status def delete_fts_index_extended_output(self, name): """ delete fts index/alias """ api = self.fts_baseUrl + "api/index/{0}".format(name) status, content, header = self._http_request( api, 'DELETE', headers=self._create_capi_headers()) return status, content, header def stop_fts_index_update(self, name): """ method to stop fts index from updating""" api = self.fts_baseUrl + "api/index/{0}/ingestControl/pause".format(name) log.info('calling api : {0}'.format(api)) status, content, header = self._http_request( api, 'POST', '', headers=self._create_capi_headers()) return status def resume_fts_index_update(self, name): """ method to stop fts index from updating""" api = self.fts_baseUrl + "api/index/{0}/ingestControl/resume".format(name) log.info('calling api : {0}'.format(api)) status, content, header = self._http_request( api, 'POST', '', headers=self._create_capi_headers()) return status def freeze_fts_index_partitions(self, name): """ method to freeze index partitions asignment""" api = self.fts_baseUrl+ "api/index/{0}/planFreezeControl/freeze".format(name) log.info('calling api : {0}'.format(api)) status, content, header = self._http_request( api, 'POST', '', headers=self._create_capi_headers()) return status def set_bleve_max_result_window(self, bmrw_value): """create or edit fts index , returns {"status":"ok"} on success""" api = self.fts_baseUrl + "api/managerOptions" params = {"bleveMaxResultWindow": str(bmrw_value)} log.info(json.dumps(params)) status, content, header = self._http_request(api, 'PUT', json.dumps(params, ensure_ascii=False), headers=self._create_capi_headers(), timeout=30) if status: log.info("Updated bleveMaxResultWindow") else: raise Exception("Error Updating bleveMaxResultWindow: {0}".format(content)) return status def set_node_setting(self, setting_name, value): """create or edit fts index , returns {"status":"ok"} on success""" api = self.fts_baseUrl + "api/managerOptions" params = {str(setting_name): str(value)} log.info(json.dumps(params)) status, content, header = self._http_request(api, 'PUT', json.dumps(params, ensure_ascii=False), headers=self._create_capi_headers(), timeout=30) if status: log.info("Updated {0}".format(setting_name)) else: raise Exception("Error Updating {0}: {1}".format(setting_name, content)) return status def unfreeze_fts_index_partitions(self, name): """ method to freeze index partitions asignment""" api = self.fts_baseUrl+ "api/index/{0}/planFreezeControl/unfreeze".format(name) log.info('calling api : {0}'.format(api)) status, content, header = self._http_request( api, 'POST', '', headers=self._create_capi_headers()) return status def disable_querying_on_fts_index(self, name): """ method to disable querying on index""" api = self.fts_baseUrl + "api/index/{0}/queryControl/disallow".format(name) log.info('calling api : {0}'.format(api)) status, content, header = self._http_request( api, 'POST', '', headers=self._create_capi_headers()) return status def enable_querying_on_fts_index(self, name): """ method to enable querying on index""" api = self.fts_baseUrl + "api/index/{0}/queryControl/allow".format(name) log.info('calling api : {0}'.format(api)) status, content, header = self._http_request( api, 'POST', '', headers=self._create_capi_headers()) return status def run_fts_query(self, index_name, query_json, timeout=70): """Method run an FTS query through rest api""" api = self.fts_baseUrl + "api/index/{0}/query".format(index_name) headers = self._create_capi_headers() status, content, header = self._http_request( api, "POST", json.dumps(query_json, ensure_ascii=False).encode('utf8'), headers, timeout=timeout) content = json.loads(content) if status: return content['total_hits'], content['hits'], content['took'], \ content['status'] else: return -1, content['error'], -1, content['status'] def run_fts_query_generalized(self, index_name, query_json, timeout=70): """Method run an FTS query through rest api""" api = self.fts_baseUrl + "api/index/{0}/query".format(index_name) headers = self._create_capi_headers() status, content, header = self._http_request( api, "POST", json.dumps(query_json, ensure_ascii=False).encode('utf8'), headers, timeout=timeout) content = json.loads(content) return content def run_fts_query_with_facets(self, index_name, query_json): """Method run an FTS query through rest api""" api = self.fts_baseUrl + "api/index/{0}/query".format(index_name) headers = self._create_capi_headers() status, content, header = self._http_request( api, "POST", json.dumps(query_json, ensure_ascii=False).encode('utf8'), headers, timeout=70) if status: content = json.loads(content) return content['total_hits'], content['hits'], content['took'], \ content['status'], content['facets'] """ End of FTS rest APIs """ def set_reb_cons_view(self, disable): """Enable/disable consistent view for rebalance tasks""" api = self.baseUrl + "internalSettings" params = {"indexAwareRebalanceDisabled": str(disable).lower()} params = urllib.parse.urlencode(params) status, content, header = self._http_request(api, "POST", params) log.info('Consistent-views during rebalance was set as indexAwareRebalanceDisabled={0}'\ .format(str(disable).lower())) return status def set_reb_index_waiting(self, disable): """Enable/disable rebalance index waiting""" api = self.baseUrl + "internalSettings" params = {"rebalanceIndexWaitingDisabled": str(disable).lower()} params = urllib.parse.urlencode(params) status, content, header = self._http_request(api, "POST", params) log.info('rebalance index waiting was set as rebalanceIndexWaitingDisabled={0}'\ .format(str(disable).lower())) return status def set_rebalance_index_pausing(self, disable): """Enable/disable index pausing during rebalance""" api = self.baseUrl + "internalSettings" params = {"rebalanceIndexPausingDisabled": str(disable).lower()} params = urllib.parse.urlencode(params) status, content, header = self._http_request(api, "POST", params) log.info('index pausing during rebalance was set as rebalanceIndexPausingDisabled={0}'\ .format(str(disable).lower())) return status def set_max_parallel_indexers(self, count): """set max parallel indexer threads""" api = self.baseUrl + "internalSettings" params = {"maxParallelIndexers": count} params = urllib.parse.urlencode(params) status, content, header = self._http_request(api, "POST", params) log.info('max parallel indexer threads was set as maxParallelIndexers={0}'.\ format(count)) return status def set_max_parallel_replica_indexers(self, count): """set max parallel replica indexers threads""" api = self.baseUrl + "internalSettings" params = {"maxParallelReplicaIndexers": count} params = urllib.parse.urlencode(params) status, content, header = self._http_request(api, "POST", params) log.info('max parallel replica indexers threads was set as maxParallelReplicaIndexers={0}'.\ format(count)) return status def get_internal_replication_type(self): buckets = self.get_buckets() cmd = "\'{ok, BC} = ns_bucket:get_bucket(%s), ns_bucket:replication_type(BC).\'" % buckets[0].name return self.diag_eval(cmd) def set_mc_threads(self, mc_threads=4): """ Change number of memcached threads and restart the cluster """ cmd = "[ns_config:update_key({node, N, memcached}, " \ "fun (PList) -> lists:keystore(verbosity, 1, PList," \ " {verbosity, \"-t %s\"}) end) " \ "|| N <- ns_node_disco:nodes_wanted()]." % mc_threads return self.diag_eval(cmd) def get_auto_compaction_settings(self): api = self.baseUrl + "settings/autoCompaction" status, content, header = self._http_request(api) return json.loads(content) def set_auto_compaction(self, parallelDBAndVC="false", dbFragmentThreshold=None, viewFragmntThreshold=None, dbFragmentThresholdPercentage=None, viewFragmntThresholdPercentage=None, allowedTimePeriodFromHour=None, allowedTimePeriodFromMin=None, allowedTimePeriodToHour=None, allowedTimePeriodToMin=None, allowedTimePeriodAbort=None, bucket=None): """Reset compaction values to default, try with old fields (dp4 build) and then try with newer fields""" params = {} api = self.baseUrl if bucket is None: # setting is cluster wide api = api + "controller/setAutoCompaction" else: # overriding per/bucket compaction setting api = api + "pools/default/buckets/" + bucket params["autoCompactionDefined"] = "true" # reuse current ram quota in mb per node num_nodes = len(self.node_statuses()) bucket_info = self.get_bucket_json(bucket) quota = self.get_bucket_json(bucket)["quota"]["ram"] // (1048576 * num_nodes) params["ramQuotaMB"] = quota params["parallelDBAndViewCompaction"] = parallelDBAndVC # Need to verify None because the value could be = 0 if dbFragmentThreshold is not None: params["databaseFragmentationThreshold[size]"] = dbFragmentThreshold if viewFragmntThreshold is not None: params["viewFragmentationThreshold[size]"] = viewFragmntThreshold if dbFragmentThresholdPercentage is not None: params["databaseFragmentationThreshold[percentage]"] = dbFragmentThresholdPercentage if viewFragmntThresholdPercentage is not None: params["viewFragmentationThreshold[percentage]"] = viewFragmntThresholdPercentage if allowedTimePeriodFromHour is not None: params["allowedTimePeriod[fromHour]"] = allowedTimePeriodFromHour if allowedTimePeriodFromMin is not None: params["allowedTimePeriod[fromMinute]"] = allowedTimePeriodFromMin if allowedTimePeriodToHour is not None: params["allowedTimePeriod[toHour]"] = allowedTimePeriodToHour if allowedTimePeriodToMin is not None: params["allowedTimePeriod[toMinute]"] = allowedTimePeriodToMin if allowedTimePeriodAbort is not None: params["allowedTimePeriod[abortOutside]"] = allowedTimePeriodAbort params = urllib.parse.urlencode(params) log.info("'%s' bucket's settings will be changed with parameters: %s" % (bucket, params)) return self._http_request(api, "POST", params) def disable_auto_compaction(self): """ Cluster-wide Setting Disable autocompaction on doc and view """ api = self.baseUrl + "controller/setAutoCompaction" log.info("Disable autocompaction in cluster-wide setting") status, content, header = self._http_request(api, "POST", "parallelDBAndViewCompaction=false") return status def set_purge_interval_and_parallel_compaction(self, interval=3, parallel="false"): """ Cluster-wide setting. Set purge interval Set parallel db and view compaction Return: status """ api = self.baseUrl + "controller/setAutoCompaction" log.info("Set purgeInterval to %s and parallel DB and view compaction to %s"\ % (interval, parallel)) params = {} params["purgeInterval"] = interval params["parallelDBAndViewCompaction"] = parallel params = urllib.parse.urlencode(params) status, content, header = self._http_request(api, "POST", params) return status, content def set_indexer_compaction(self, mode="circular", indexDayOfWeek=None, indexFromHour=0, indexFromMinute=0, abortOutside=False, indexToHour=0, indexToMinute=0, fragmentation=30): """Reset compaction values to default, try with old fields (dp4 build) and then try with newer fields""" params = {} api = self.baseUrl + "controller/setAutoCompaction" params["indexCompactionMode"] = mode params["indexCircularCompaction[interval][fromHour]"] = indexFromHour params["indexCircularCompaction[interval][fromMinute]"] = indexFromMinute params["indexCircularCompaction[interval][toHour]"] = indexToHour params["indexCircularCompaction[interval][toMinute]"] = indexToMinute if indexDayOfWeek: params["indexCircularCompaction[daysOfWeek]"] = indexDayOfWeek params["indexCircularCompaction[interval][abortOutside]"] = str(abortOutside).lower() params["parallelDBAndViewCompaction"] = "false" if mode == "full": params["indexFragmentationThreshold[percentage]"] = fragmentation log.info("Indexer Compaction Settings: %s" % (params)) params = urllib.parse.urlencode(params) return self._http_request(api, "POST", params) def set_global_loglevel(self, loglevel='error'): """Set cluster-wide logging level for core components Possible loglevel: -- debug -- info -- warn -- error """ api = self.baseUrl + 'diag/eval' request_body = 'rpc:eval_everywhere(erlang, apply, [fun () -> \ [ale:set_loglevel(L, {0}) || L <- \ [ns_server, couchdb, user, menelaus, ns_doctor, stats, \ rebalance, cluster, views, stderr]] end, []]).'.format(loglevel) return self._http_request(api=api, method='POST', params=request_body, headers=self._create_headers()) def set_indexer_params(self, parameter, val): """ :Possible parameters: -- indexerThreads -- memorySnapshotInterval -- stableSnapshotInterval -- maxRollbackPoints -- logLevel """ params = {} api = self.baseUrl + 'settings/indexes' params[parameter] = val params = urllib.parse.urlencode(params) status, content, header = self._http_request(api, "POST", params) log.info('Indexer {0} set to {1}'.format(parameter, val)) return status def get_global_index_settings(self): api = self.baseUrl + "settings/indexes" status, content, header = self._http_request(api) if status: return json.loads(content) return None def set_couchdb_option(self, section, option, value): """Dynamic settings changes""" cmd = 'ns_config:set({{couchdb, {{{0}, {1}}}}}, {2}).'.format(section, option, value) return self.diag_eval(cmd) def get_alerts(self): api = self.baseUrl + "pools/default/" status, content, header = self._http_request(api) json_parsed = json.loads(content) if status: if "alerts" in json_parsed: return json_parsed['alerts'] else: return None def get_nodes_data_from_cluster(self, param="nodes"): api = self.baseUrl + "pools/default/" status, content, header = self._http_request(api) json_parsed = json.loads(content) if status: if param in json_parsed: return json_parsed[param] else: return None def flush_bucket(self, bucket="default"): if isinstance(bucket, Bucket): bucket_name = bucket.name else: bucket_name = bucket api = self.baseUrl + "pools/default/buckets/%s/controller/doFlush" % (bucket_name) status, content, header = self._http_request(api, 'POST') if not status: raise BucketFlushFailed(self.ip, bucket_name) log.info("Flush for bucket '%s' was triggered" % bucket_name) return True def update_notifications(self, enable): api = self.baseUrl + 'settings/stats' params = urllib.parse.urlencode({'sendStats' : enable}) log.info('settings/stats params : {0}'.format(params)) status, content, header = self._http_request(api, 'POST', params) return status def get_notifications(self): api = self.baseUrl + 'settings/stats' status, content, header = self._http_request(api) json_parsed = json.loads(content) if status: return json_parsed["sendStats"] return None def get_num_rollback_stat(self, bucket): api = self.index_baseUrl + 'stats' status, content, header = self._http_request(api) json_parsed = json.loads(content) num_rollback = json_parsed["MAINT_STREAM:{}:num_rollbacks".format(bucket)] return num_rollback def get_num_rollback_to_zero_stat(self, bucket): api = self.index_baseUrl + 'stats' status, content, header = self._http_request(api) json_parsed = json.loads(content) num_rollback = json_parsed["MAINT_STREAM:{}:num_rollbacks_to_zero".format(bucket)] return num_rollback def get_logs(self, last_n=10, contains_text=None): api = self.baseUrl + 'logs' status, content, header = self._http_request(api) json_parsed = json.loads(content.decode("utf-8","ignore")) logs = json_parsed['list'] logs.reverse() result = [] for i in range(min(last_n, len(logs))): result.append(logs[i]) if contains_text is not None and contains_text in logs[i]["text"]: break return result def print_UI_logs(self, last_n=10, contains_text=None): logs = self.get_logs(last_n, contains_text) log.info("Latest logs from UI on {0}:".format(self.ip)) for lg in logs: log.error(lg) def get_ro_user(self): api = self.baseUrl + 'settings/readOnlyAdminName' status, content, header = self._http_request(api, 'GET', '') return content, status def delete_ro_user(self): api = self.baseUrl + 'settings/readOnlyUser' status, content, header = self._http_request(api, 'DELETE', '') return status def create_ro_user(self, username, password): api = self.baseUrl + 'settings/readOnlyUser' params = urllib.parse.urlencode({'username' : username, 'password' : password}) log.info('settings/readOnlyUser params : {0}'.format(params)) status, content, header = self._http_request(api, 'POST', params) return status # Change password for readonly user def changePass_ro_user(self, username, password): api = self.baseUrl + 'settings/readOnlyUser' params = urllib.parse.urlencode({'username' : username, 'password' : password}) log.info('settings/readOnlyUser params : {0}'.format(params)) status, content, header = self._http_request(api, 'PUT', params) return status '''Start Monitoring/Profiling Rest Calls''' def set_completed_requests_collection_duration(self, server, min_time): http = httplib2.Http(disable_ssl_certificate_validation=True) n1ql_port = CbServer.n1ql_port protocol = "http" if CbServer.use_https: n1ql_port = str(CbServer.ssl_port_map.get(str(n1ql_port), str(n1ql_port))) protocol = "https" api = "%s://%s:%s/" % (protocol,server.ip, n1ql_port) + "admin/settings" body = {"completed-threshold": min_time} headers = self._create_headers_with_auth('Administrator', 'password') response, content = http.request(api, "POST", headers=headers, body=json.dumps(body)) return response, content def set_completed_requests_max_entries(self, server, no_entries): http = httplib2.Http(disable_ssl_certificate_validation=True) n1ql_port = CbServer.n1ql_port protocol = "http" if CbServer.use_https: n1ql_port = str(CbServer.ssl_port_map.get(str(n1ql_port), str(n1ql_port))) protocol = "https" api = "%s://%s:%s/" % (protocol, server.ip, n1ql_port) + "admin/settings" body = {"completed-limit": no_entries} headers = self._create_headers_with_auth('Administrator', 'password') response, content = http.request(api, "POST", headers=headers, body=json.dumps(body)) return response, content def set_profiling(self, server, setting): http = httplib2.Http(disable_ssl_certificate_validation=True) n1ql_port = CbServer.n1ql_port protocol = "http" if CbServer.use_https: n1ql_port = str(CbServer.ssl_port_map.get(str(n1ql_port), str(n1ql_port))) protocol = "https" api = "%s://%s:%s/" % (protocol, server.ip, n1ql_port) + "admin/settings" body = {"profile": setting} headers = self._create_headers_with_auth('Administrator', 'password') response, content = http.request(api, "POST", headers=headers, body=json.dumps(body)) return response, content def set_query_servicers(self, server, setting, servicers="servicers"): http = httplib2.Http(disable_ssl_certificate_validation=True) n1ql_port = CbServer.n1ql_port protocol = "http" if CbServer.use_https: n1ql_port = str(CbServer.ssl_port_map.get(str(n1ql_port), str(n1ql_port))) protocol = "https" api = "%s://%s:%s/" % (protocol, server.ip, n1ql_port) + "admin/settings" body = {servicers: setting} headers = self._create_headers_with_auth('Administrator', 'password') response, content = http.request(api, "POST", headers=headers, body=json.dumps(body)) return response, content def set_profiling_controls(self, server, setting): http = httplib2.Http(disable_ssl_certificate_validation=True) n1ql_port = CbServer.n1ql_port protocol = "http" if CbServer.use_https: n1ql_port = str(CbServer.ssl_port_map.get(str(n1ql_port), str(n1ql_port))) protocol = "https" api = "%s://%s:%s/" % (protocol, server.ip, n1ql_port) + "admin/settings" body = {"controls": setting} headers = self._create_headers_with_auth('Administrator', 'password') response, content = http.request(api, "POST", headers=headers, body=json.dumps(body)) return response, content def get_query_admin_settings(self, server): http = httplib2.Http(disable_ssl_certificate_validation=True) n1ql_port = CbServer.n1ql_port protocol = "http" if CbServer.use_https: n1ql_port = str(CbServer.ssl_port_map.get(str(n1ql_port), str(n1ql_port))) protocol = "https" api = "%s://%s:%s/" % (protocol, server.ip, n1ql_port) + "admin/settings" headers = self._create_headers_with_auth('Administrator', 'password') response, content = http.request(api, "GET", headers=headers) result = json.loads(content) return result def get_query_vitals(self, server): http = httplib2.Http(disable_ssl_certificate_validation=True) n1ql_port = CbServer.n1ql_port protocol = "http" if CbServer.use_https: n1ql_port = str(CbServer.ssl_port_map.get(str(n1ql_port), str(n1ql_port))) protocol = "https" api = "%s://%s:%s/" % (protocol,server.ip, n1ql_port) + "admin/vitals" headers = self._create_headers_with_auth('Administrator', 'password') response, content = http.request(api, "GET", headers=headers) return response, content '''End Monitoring/Profiling Rest Calls''' def create_whitelist(self, server, whitelist): http = httplib2.Http(disable_ssl_certificate_validation=True) protocol = "http" if CbServer.use_https: protocol = "https" api = "%s://%s:%s/" % (protocol, server.ip, server.port) + "settings/querySettings/curlWhitelist" headers = self._create_headers_with_auth('Administrator', 'password') response, content = http.request(api, "POST", headers=headers, body=json.dumps(whitelist)) return response, content def query_tool(self, query, port=8093, timeout=1300, query_params={}, is_prepared=False, named_prepare=None, verbose = True, encoded_plan=None, servers=None): if timeout is None: timeout = 1300 protocol = "http" if CbServer.use_https: port = str(CbServer.ssl_port_map.get(str(port), str(port))) protocol = "https" key = 'prepared' if is_prepared else 'statement' headers = None prepared = json.dumps(query) if is_prepared: if named_prepare and encoded_plan: http = httplib2.Http(disable_ssl_certificate_validation=True) if len(servers)>1: url = "%s://%s:%s/query/service" % (protocol, servers[1].ip, port) else: url = "%s://%s:%s/query/service" % (protocol, self.ip, port) headers = self._create_headers_encoded_prepared() body = {'prepared': named_prepare, 'encoded_plan':encoded_plan} response, content = http.request(url, 'POST', headers=headers, body=json.dumps(body)) return eval(content) elif named_prepare and not encoded_plan: params = 'prepared=' + urllib.parse.quote(prepared, '~()') params = 'prepared="%s"'% named_prepare else: if isinstance(query, dict): prepared = json.dumps(query['name']) else: prepared = json.dumps(query) prepared = str(prepared) params = 'prepared=' + urllib.parse.quote(prepared, '~()') if 'creds' in query_params and query_params['creds']: headers = self._create_headers_with_auth(query_params['creds'][0]['user'], query_params['creds'][0]['pass']) api = "%s://%s:%s/query/service?%s" % (protocol, self.ip, port, params) log.info("%s"%api) else: params = {key : query} try: if 'creds' in query_params and query_params['creds']: headers = self._create_headers_with_auth(query_params['creds'][0]['user'], query_params['creds'][0]['pass']) del query_params['creds'] except Exception: traceback.print_exc() params.update(query_params) params = urllib.parse.urlencode(params) if verbose: log.info('query params : {0}'.format(params)) api = "%s://%s:%s/query?%s" % (protocol, self.ip, port, params) if 'query_context' in query_params and query_params['query_context']: log.info(f"Running Query with query_context: {query_params["query_context"]}") try: status, content, header = self._http_request(api, 'POST', timeout=timeout, headers=headers) except Exception as ex: print("\nException error: ", str(ex)) print("\napi: ", api) print("\nheaders: ", headers) try: return json.loads(content) except ValueError: return content def analytics_tool(self, query, port=8095, timeout=650, query_params={}, is_prepared=False, named_prepare=None, verbose = True, encoded_plan=None, servers=None): protocol = "http" if CbServer.use_https: port = str(CbServer.ssl_port_map.get(str(port), str(port))) protocol = "https" key = 'prepared' if is_prepared else 'statement' headers = None content="" prepared = json.dumps(query) if is_prepared: if named_prepare and encoded_plan: http = httplib2.Http(disable_ssl_certificate_validation=True) if len(servers)>1: url = "%s://%s:%s/query/service" % (protocol, servers[1].ip, port) else: url = "%s://%s:%s/query/service" % (protocol, self.ip, port) headers = {'Content-type': 'application/json'} body = {'prepared': named_prepare, 'encoded_plan':encoded_plan} response, content = http.request(url, 'POST', headers=headers, body=json.dumps(body)) return eval(content) elif named_prepare and not encoded_plan: params = 'prepared=' + urllib.parse.quote(prepared, '~()') params = 'prepared="%s"'% named_prepare else: prepared = json.dumps(query) prepared = str(prepared.encode('utf-8')) params = 'prepared=' + urllib.parse.quote(prepared, '~()') if 'creds' in query_params and query_params['creds']: headers = self._create_headers_with_auth(query_params['creds'][0]['user'], query_params['creds'][0]['pass']) api = "%s/analytics/service?%s" % (self.cbas_base_url, params) log.info("%s"%api) else: params = {key : query} if 'creds' in query_params and query_params['creds']: headers = self._create_headers_with_auth(query_params['creds'][0]['user'], query_params['creds'][0]['pass']) del query_params['creds'] params.update(query_params) params = urllib.parse.urlencode(params) if verbose: log.info('query params : {0}'.format(params)) api = "%s/analytics/service?%s" % (self.cbas_base_url, params) status, content, header = self._http_request(api, 'POST', timeout=timeout, headers=headers) try: return json.loads(content) except ValueError: return content def query_tool_stats(self, server): n1ql_port = CbServer.n1ql_port protocol = "http" if CbServer.use_https: n1ql_port = CbServer.ssl_n1ql_port protocol = "https" log.info('query n1ql stats') api = "%s://%s:%s/admin/stats" % (protocol, server.ip, str(n1ql_port)) status, content, header = self._http_request(api, 'GET') log.info(content) try: return json.loads(content) except ValueError: return content def index_tool_stats(self, show_index_stats=True): log.info('index n1ql stats') port = CbServer.port protocol = "http" if CbServer.use_https: port = CbServer.ssl_port protocol = "https" api = "%s://%s:%s/indexStatus" % (protocol, self.ip, port) params = "" status, content, header = self._http_request(api, 'GET', params) if show_index_stats: log.info(content) try: return json.loads(content) except ValueError: return content # return all rack/zone info def get_all_zones_info(self, timeout=120): zones = {} api = self.baseUrl + 'pools/default/serverGroups' status, content, header = self._http_request(api, timeout=timeout) if status: zones = json.loads(content) else: raise Exception("Failed to get all zones info.\n \ Zone only supports from couchbase server version 2.5 and up.") return zones # return group name and unique uuid def get_zone_names(self): zone_names = {} zone_info = self.get_all_zones_info() if zone_info and len(zone_info["groups"]) >= 1: for i in range(0, len(zone_info["groups"])): # pools/default/serverGroups/ = 27 chars zone_names[zone_info["groups"][i]["name"]] = zone_info["groups"][i]["uri"][28:] return zone_names def add_zone(self, zone_name): api = self.baseUrl + 'pools/default/serverGroups' request_name = "name={0}".format(zone_name) status, content, header = self._http_request(api, "POST", \ params=request_name) if status: log.info("zone {0} is added".format(zone_name)) return True else: raise Exception("Failed to add zone with name: %s " % zone_name) def delete_zone(self, zone_name): api = self.baseUrl + 'pools/default/serverGroups/' # check if zone exist found = False zones = self.get_zone_names() for zone in zones: if zone_name == zone: api += zones[zone_name] found = True break if not found: raise Exception("There is not zone with name: %s in cluster" % zone_name) status, content, header = self._http_request(api, "DELETE") if status: log.info("zone {0} is deleted".format(zone_name)) else: raise Exception("Failed to delete zone with name: %s " % zone_name) def rename_zone(self, old_name, new_name): api = self.baseUrl + 'pools/default/serverGroups/' # check if zone exist found = False zones = self.get_zone_names() for zone in zones: if old_name == zone: api += zones[old_name] request_name = "name={0}".format(new_name) found = True break if not found: raise Exception("There is not zone with name: %s in cluster" % old_name) status, content, header = self._http_request(api, "PUT", params=request_name) if status: log.info("zone {0} is renamed to {1}".format(old_name, new_name)) else: raise Exception("Failed to rename zone with name: %s " % old_name) # get all nodes info in one zone/rack/group def get_nodes_in_zone(self, zone_name): nodes = {} tmp = {} zone_info = self.get_all_zones_info() if zone_name != "": found = False if len(zone_info["groups"]) >= 1: for i in range(0, len(zone_info["groups"])): if zone_info["groups"][i]["name"] == zone_name: tmp = zone_info["groups"][i]["nodes"] if not tmp: log.info("zone {0} is existed but no node in it".format(zone_name)) # remove port for node in tmp: node["hostname"] = node["hostname"].split(":") node["hostname"] = node["hostname"][0] nodes[node["hostname"]] = node found = True break if not found: raise Exception("There is not zone with name: %s in cluster" % zone_name) return nodes def get_zone_and_nodes(self): """ only return zones with node in its """ zones = {} tmp = {} zone_info = self.get_all_zones_info() if len(zone_info["groups"]) >= 1: for i in range(0, len(zone_info["groups"])): tmp = zone_info["groups"][i]["nodes"] if not tmp: log.info("zone {0} is existed but no node in it".format(tmp)) # remove port else: nodes = [] for node in tmp: node["hostname"] = node["hostname"].split(":") node["hostname"] = node["hostname"][0] print(node["hostname"][0]) nodes.append(node["hostname"]) zones[zone_info["groups"][i]["name"]] = nodes return zones def get_zone_uri(self): zone_uri = {} zone_info = self.get_all_zones_info() if zone_info and len(zone_info["groups"]) >= 1: for i in range(0, len(zone_info["groups"])): zone_uri[zone_info["groups"][i]["name"]] = zone_info["groups"][i]["uri"] return zone_uri def shuffle_nodes_in_zones(self, moved_nodes, source_zone, target_zone): # moved_nodes should be a IP list like # ["192.168.171.144", "192.168.171.145"] request = "" for i in range(0, len(moved_nodes)): moved_nodes[i] = "ns_1@" + moved_nodes[i] all_zones = self.get_all_zones_info() api = self.baseUrl + all_zones["uri"][1:] moved_node_json = [] for i in range(0, len(all_zones["groups"])): for node in all_zones["groups"][i]["nodes"]: if all_zones["groups"][i]["name"] == source_zone: for n in moved_nodes: if n == node["otpNode"]: moved_node_json.append({"otpNode": node["otpNode"]}) zone_json = {} group_json = [] for i in range(0, len(all_zones["groups"])): node_j = [] zone_json["uri"] = all_zones["groups"][i]["uri"] zone_json["name"] = all_zones["groups"][i]["name"] zone_json["nodes"] = node_j if not all_zones["groups"][i]["nodes"]: if all_zones["groups"][i]["name"] == target_zone: for i in range(0, len(moved_node_json)): zone_json["nodes"].append(moved_node_json[i]) else: zone_json["nodes"] = [] else: for node in all_zones["groups"][i]["nodes"]: if all_zones["groups"][i]["name"] == source_zone and \ node["otpNode"] in moved_nodes: pass else: node_j.append({"otpNode": node["otpNode"]}) if all_zones["groups"][i]["name"] == target_zone: for k in range(0, len(moved_node_json)): node_j.append(moved_node_json[k]) zone_json["nodes"] = node_j group_json.append({"name": zone_json["name"], "uri": zone_json["uri"], "nodes": zone_json["nodes"]}) request = '{{"groups": {0} }}'.format(json.dumps(group_json)) status, content, header = self._http_request(api, "PUT", params=request) # sample request format # request = ' {"groups":[{"uri":"/pools/default/serverGroups/0","nodes": [] },\ # {"uri":"/pools/default/serverGroups/c8275b7a88e6745c02815dde4a505e70","nodes": [] },\ # {"uri":"/pools/default/serverGroups/1acd9810a027068bd14a1ddd43db414f","nodes": \ # [{"otpNode":"ns_1@192.168.171.144"},{"otpNode":"ns_1@192.168.171.145"}]} ]} ' return status def is_zone_exist(self, zone_name): found = False zones = self.get_zone_names() if zones: for zone in zones: if zone_name == zone: found = True return True break if not found: log.error("There is not zone with name: {0} in cluster.".format(zone_name)) return False def get_items_info(self, keys, bucket='default'): items_info = {} for key in keys: api = '{0}{1}{2}/docs/{3}'.format(self.baseUrl, 'pools/default/buckets/', bucket, key) status, content, header = self._http_request(api) if status: items_info[key] = json.loads(content) return items_info def start_cluster_logs_collection(self, nodes="*", upload=False, \ uploadHost=None, customer="", ticket=""): if not upload: params = urllib.parse.urlencode({"nodes":nodes}) else: params = urllib.parse.urlencode({"nodes":nodes, "uploadHost":uploadHost, \ "customer":customer, "ticket":ticket}) api = self.baseUrl + "controller/startLogsCollection" status, content, header = self._http_request(api, "POST", params) return status, content def get_cluster_logs_collection_info(self): api = self.baseUrl + "pools/default/tasks/" status, content, header = self._http_request(api, "GET") if status: tmp = json.loads(content) for k in tmp: if k["type"] == "clusterLogsCollection": content = k return content return None """ result["progress"]: progress logs collected at cluster level result["status]: status logs collected at cluster level result["perNode"]: all information logs collected at each node """ def get_cluster_logs_collection_status(self): result = self.get_cluster_logs_collection_info() if result: return result["progress"], result["status"], result["perNode"] return None, None, None def cancel_cluster_logs_collection(self): api = self.baseUrl + "controller/cancelLogsCollection" status, content, header = self._http_request(api, "POST") return status, content def set_log_redaction_level(self, redaction_level="none"): api = self.baseUrl + "settings/logRedaction" params = urllib.parse.urlencode({"logRedactionLevel":redaction_level}) status, content, header = self._http_request(api, "POST", params) if status: result = json.loads(content) if result["logRedactionLevel"] == redaction_level: return True else: return False return False def get_bucket_CCCP(self, bucket): log.info("Getting CCCP config ") api = '%spools/default/b/%s' % (self.baseUrl, bucket) if isinstance(bucket, Bucket): api = '%spools/default/b/%s' % (self.baseUrl, bucket.name) status, content, header = self._http_request(api) if status: return json.loads(content) return None def get_recovery_task(self): content = self.ns_server_tasks() for item in content: if item["type"] == "recovery": return item return None def get_recovery_progress(self, recoveryStatusURI): api = '%s%s' % (self.baseUrl, recoveryStatusURI) status, content, header = self._http_request(api) if status: return json.loads(content) return None def get_warming_up_tasks(self): tasks = self.ns_server_tasks() tasks_warmup = [] for task in tasks: if task["type"] == "warming_up": tasks_warmup.append(task) return tasks_warmup def compact_bucket(self, bucket="default"): api = self.baseUrl + 'pools/default/buckets/{0}/controller/compactBucket'.format(bucket) status, content, header = self._http_request(api, 'POST') if status: log.info('bucket compaction successful') else: raise BucketCompactionException(bucket) return True def cancel_bucket_compaction(self, bucket="default"): api = self.baseUrl + 'pools/default/buckets/{0}/controller/cancelBucketCompaction'.format(bucket) if isinstance(bucket, Bucket): api = self.baseUrl + 'pools/default/buckets/{0}/controller/cancelBucketCompaction'.format(bucket.name) status, content, header = self._http_request(api, 'POST') log.info("Status is {0}".format(status)) if status: log.info('Cancel bucket compaction successful') else: raise BucketCompactionException(bucket) return True def set_bucket_compressionMode(self, bucket="default", mode="passive"): api = self.baseUrl + "pools/default/buckets/" + bucket body = {'compressionMode': mode} params = urllib.parse.urlencode(body) headers = self._create_headers() status, content, header = self._http_request(api, 'POST', params=params, headers=headers) log.info("{0} with params: {1}".format(api, params)) if not status: raise Exception("Unable to set compressionMode {0} for bucket {1}".format(mode, bucket)) '''LDAP Rest API ''' ''' clearLDAPSettings - Function to clear LDAP settings Parameter - None Returns - status of LDAPAuth clear command ''' def clearLDAPSettings(self): api = self.baseUrl + 'settings/saslauthdAuth' params = urllib.parse.urlencode({'enabled':'false'}) status, content, header = self._http_request(api, 'POST', params) return status, content, header ''' ldapUserRestOperation - Execute LDAP REST API Input Parameter - authOperation - this is for auth need to be enabled or disabled - True or 0 currAdmmins - a list of username to add to full admin matching with ldap currROAdmins - a list of username to add to RO Admin Returns - status, content and header for the command executed ''' def ldapUserRestOperation(self, authOperation, adminUser='', ROadminUser=''): authOperation = authOperation currAdmins = '' currROAdmins = '' if (adminUser != ''): for user in adminUser: currAdmins = user[0] + "\n\r" + currAdmins if (ROadminUser != ''): for user in ROadminUser: currROAdmins = user[0] + "\n\r" + currROAdmins content = self.executeLDAPCommand(authOperation, currAdmins, currROAdmins) '''LDAP Rest API ''' ''' clearLDAPSettings - Function to clear LDAP settings Parameter - None Returns - status of LDAPAuth clear command ''' def clearLDAPSettings (self): api = self.baseUrl + 'settings/saslauthdAuth' params = urllib.parse.urlencode({'enabled':'false'}) status, content, header = self._http_request(api, 'POST', params) return status, content, header ''' ldapUserRestOperation - Execute LDAP REST API Input Parameter - authOperation - this is for auth need to be enabled or disabled - True or 0 currAdmmins - a list of username to add to full admin matching with ldap currROAdmins - a list of username to add to RO Admin Returns - status, content and header for the command executed ''' def ldapUserRestOperation(self, authOperation, adminUser='', ROadminUser='', exclude=None): if (authOperation): authOperation = 'true' else: authOperation = 'false' currAdmins = '' currROAdmins = '' if (adminUser != ''): for user in adminUser: currAdmins = user[0] + "\n\r" + currAdmins if (ROadminUser != ''): for user in ROadminUser: currROAdmins = user[0] + "\n\r" + currROAdmins content = self.executeLDAPCommand(authOperation, currAdmins, currROAdmins, exclude) ''' executeLDAPCommand - Execute LDAP REST API Input Parameter - authOperation - this is for auth need to be enabled or disabled - True or 0 currAdmmins - a list of username to add to full admin matching with ldap currROAdmins - a list of username to add to RO Admin Returns - status, content and header for the command executed ''' def executeLDAPCommand(self, authOperation, currAdmins, currROAdmins, exclude=None): api = self.baseUrl + "settings/saslauthdAuth" if (exclude is None): log.info ("into exclude is None") params = urllib.parse.urlencode({ 'enabled': authOperation, 'admins': '{0}'.format(currAdmins), 'roAdmins': '{0}'.format(currROAdmins), }) else: log.info ("Into exclude for value of fullAdmin {0}".format(exclude)) if (exclude == 'fullAdmin'): params = urllib.parse.urlencode({ 'enabled': authOperation, 'roAdmins': '{0}'.format(currROAdmins), }) else: log.info ("Into exclude for value of fullAdmin {0}".format(exclude)) params = urllib.parse.urlencode({ 'enabled': authOperation, 'admins': '{0}'.format(currAdmins), }) status, content, header = self._http_request(api, 'POST', params) return content ''' validateLogin - Validate if user can login using a REST API Input Parameter - user and password to check for login. Also take a boolean to decide if the status should be 200 or 400 and everything else should be false Returns - True of false based if user should login or login fail ''' def validateLogin(self, user, password, login, getContent=False): api = self.baseUrl + "uilogin" header = {'Content-type': 'application/x-www-form-urlencoded'} params = urllib.parse.urlencode({'user':'{0}'.format(user), 'password':'{0}'.format(password)}) log.info ("value of param is {0}".format(params)) http = httplib2.Http() status, content = http.request(api, 'POST', headers=header, body=params) log.info ("Status of login command - {0}".format(status)) if (getContent): return status, content if ((status['status'] == "200" and login == True) or (status ['status'] == "400" and login == False)): return True else: return False ''' ldapRestOperationGet - Get setting of LDAPAuth - Settings Returns - list of Admins, ROAdmins and is LDAPAuth enabled or not ''' def ldapRestOperationGetResponse(self): log.info ("GET command for LDAP Auth") api = self.baseUrl + "settings/saslauthdAuth" status, content, header = self._http_request(api, 'GET') return json.loads(content) ''' executeValidateCredentials - API to check credentials of users Input - user and password that needs validation Returns - [role]:<currentrole> [source]:<saslauthd,builtin> ''' def executeValidateCredentials(self, user, password): api = self.baseUrl + "validateCredentials" params = urllib.parse.urlencode({ 'user':'{0}'.format(user), 'password':'{0}'.format(password) }) status, content, header = self._http_request(api, 'POST', params) log.info ("Status of executeValidateCredentials command - {0}".format(status)) return status, json.loads(content) '''MadHatter LDAP Group Support''' ''' Assign group roles ''' def add_group_role(self,group_name,description,roles,ldap_group_ref=None): api = self.baseUrl + "/settings/rbac/groups/" + group_name if ldap_group_ref is not None: params = urllib.parse.urlencode({ 'description':'{0}'.format(description), 'roles':'{0}'.format(roles), 'ldap_group_ref':'{0}'.format(ldap_group_ref) }) else: params = urllib.parse.urlencode({ 'description':'{0}'.format(description), 'roles':'{0}'.format(roles) }) status, content, header = self._http_request(api, 'PUT', params) log.info ("Status of Adding role to group command is {0}".format(status)) return status, json.loads(content) def delete_group(self,group_name): api = self.baseUrl + "/settings/rbac/groups/" + group_name status, content, header = self._http_request(api, 'DELETE') log.info ("Status of Delete role from CB is {0}".format(status)) return status, json.loads(content) def get_group_list(self): api = self.baseUrl + "/settings/rbac/groups/" status, content, header = self._http_request(api, 'GET') return status, json.loads(content) def get_group_details(self, group_name): api = self.baseUrl + "/settings/rbac/groups/" + group_name status, content, header = self._http_request(api, 'GET') return status, json.loads(content) def add_user_group(self,group_name,user_name): api = self.baseUrl + "/settings/rbac/users/local/" + user_name params = urllib.parse.urlencode({ 'groups':'{0}'.format(group_name) }) status, content, header = self._http_request(api, 'PUT', params) log.info ("Status of Adding role to group command is {0}".format(status)) return status, json.loads(content) def get_user_group(self,user_name): api = self.baseUrl + "/settings/rbac/users/local/" + user_name status, content, header = self._http_request(api, 'GET') log.info ("Status of Adding role to group command is {0}".format(status)) return status, json.loads(content) def grp_invalidate_cache(self): api = self.baseUrl + "/settings/invalidateLDAPCache/" status, content, header = self._http_request(api, 'POST') log.info("Status of Adding role to group command is {0}".format(status)) return status, json.loads(content) def invalidate_ldap_cache(self): api = self.baseUrl + '/settings/invalidateLDAPCache' status, content, header = self._http_request(api, 'POST') log.info("Status of Invalidate LDAP Cached is {0}".format(status)) return status, json.loads(content) def ldap_validate_conn(self): api = self.baseUrl + "/settings/ldap/validate/connectivity" status, content, header = self._http_request(api, 'POST') log.info("Status of Adding role to group command is {0}".format(status)) return status, json.loads(content) def ldap_validate_authen(self, user_name, password='password'): api = self.baseUrl + "/settings/ldap/validate/authentication" params = urllib.parse.urlencode({ 'auth_user': '{0}'.format(user_name), 'auth_pass': '{0}'.format(password) }) status, content, header = self._http_request(api, 'POST', params) log.info("Status of Adding role to group command is {0}".format(status)) return status, json.loads(content) def ldap_validate_grp_query(self, user): api = self.baseUrl + "/settings/ldap/validate/groups_query" params = urllib.parse.urlencode({ 'groups_query_user':'{0}'.format(user) }) status, content, header = self._http_request(api, 'POST',params) log.info ("Status of Adding role to group command is {0}".format(status)) return status, json.loads(content) def setup_ldap(self, data, extraparam): api = self.baseUrl + '/settings/ldap/' params = urllib.parse.urlencode(data) params = params + "&" + extraparam status, content, header = self._http_request(api, 'POST',params) log.info ("Status of Setting up LDAP command is {0}".format(status)) return status, json.loads(content) ''' Audit Commands ''' ''' getAuditSettings - API returns audit settings for Audit Input - None Returns - [archive_path]:<path for archieve> [auditd_enabled]:<enabled disabled status for auditd> [log_path]:<path for logs> [rotate_interval]:<log rotate interval> ''' def getAuditSettings(self): api = self.baseUrl + "settings/audit" status, content, header = self._http_request(api, 'GET') return json.loads(content) ''' getAuditSettings - API returns audit settings for Audit Input - [archive_path]:<path for archieve> [auditd_enabled]:<enabled disabled status for auditd> [rotate_interval]:<log rotate interval in seconds> ''' def setAuditSettings(self, enabled='true', rotateInterval=86400, logPath='/opt/couchbase/var/lib/couchbase/logs', services_to_disable=None): api = self.baseUrl + "settings/audit" params = {'rotateInterval':'{0}'.format(rotateInterval), 'auditdEnabled':'{0}'.format(enabled), 'logPath':'{0}'.format(logPath)} if services_to_disable: params['disabled'] = ",".join(services_to_disable) params = urllib.parse.urlencode(params) status, content, header = self._http_request(api, 'POST', params) log.info ("Value os status is {0}".format(status)) log.info ("Value of content is {0}".format(content)) if status: return status else: return status, json.loads(content) def get_audit_descriptors(self): api = self.baseUrl + "/settings/audit/descriptors" status, content, header = self._http_request(api, 'GET', headers=self._create_capi_headers()) return json.loads(content) if status else None def _set_secrets_password(self, new_password): api = self.baseUrl + "/node/controller/changeMasterPassword" params = urllib.parse.urlencode({ 'newPassword': '{0}'.format(new_password.encode('utf-8').strip()) }) log.info("Params getting set is ---- {0}".format(params)) params = params.replace('%24', '$') params = params.replace('%3D', '=') log.info("Params getting set is ---- {0}".format(params)) status, content, header = self._http_request(api, 'POST', params) log.info("Status of set password command - {0}".format(status)) log.info("Content of the response is {0}".format(content)) log.info ("Header of the response is {0}".format(header)) return status def set_downgrade_storage_mode_with_rest(self, downgrade=True, username="Administrator", password="password"): authorization = self.get_authorization(username, password) if downgrade: api = self.index_baseUrl + 'settings/storageMode?downgrade=true' else: api = self.index_baseUrl + 'settings/storageMode?downgrade=false' headers = {'Content-type': 'application/json','Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'POST', headers=headers) if not status: raise Exception(content) return json.loads(content) def create_index_with_rest(self, create_info, username="Administrator", password="password"): log.info("CREATE INDEX USING REST WITH PARAMETERS: " + str(create_info)) authorization = self.get_authorization(username, password) api = self.index_baseUrl + 'internal/indexes?create=true' headers = {'Content-type': 'application/json','Authorization': 'Basic %s' % authorization} params = json.loads("{0}".format(create_info).replace('\'', '"').replace('True', 'true').replace('False', 'false')) status, content, header = self._http_request(api, 'POST', headers=headers, params=json.dumps(params).encode("ascii", "ignore")) if not status: raise Exception(content) return json.loads(content) def build_index_with_rest(self, id, username="Administrator", password="password"): credentials = '{}:{}'.format(self.username, self.password) authorization = base64.encodebytes(credentials.encode('utf-8')) authorization = authorization.decode('utf-8').rstrip('\n') api = self.index_baseUrl + 'internal/indexes?build=true' build_info = {'ids': [id]} headers = {'Content-type': 'application/json','Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'PUT', headers=headers, params=json.dumps(build_info)) if not status: raise Exception(content) return json.loads(content) def drop_index_with_rest(self, id, username="Administrator", password="password"): authorization = self.get_authorization(username, password) url = 'internal/index/{0}'.format(id) api = self.index_baseUrl + url headers = {'Content-type': 'application/json','Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'DELETE', headers=headers) if not status: raise Exception(content) def get_all_indexes_with_rest(self, username="Administrator", password="password"): credentials = '{}:{}'.format(self.username, self.password) authorization = base64.encodebytes(credentials.encode('utf-8')) authorization = authorization.decode('utf-8').rstrip('\n') url = 'internal/indexes' api = self.index_baseUrl + url headers = {'Content-type': 'application/json','Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'GET', headers=headers) if not status: raise Exception(content) return json.loads(content) def lookup_gsi_index_with_rest(self, id, body, username="Administrator", password="password"): authorization = self.get_authorization(username, password) url = 'internal/index/{0}?lookup=true'.format(id) api = self.index_baseUrl + url headers = {'Content-type': 'application/json','Authorization': 'Basic %s' % authorization} params = json.loads("{0}".format(body).replace('\'', '"').replace('True', 'true').replace('False', 'false')) status, content, header = self._http_request(api, 'GET', headers=headers, params=json.dumps(params).encode("ascii", "ignore")) if not status: raise Exception(content) return json.loads(content) def full_table_scan_gsi_index_with_rest(self, id, body, username="Administrator", password="password"): if "limit" not in list(body.keys()): body["limit"] = 900000 authorization = self.get_authorization(username, password) url = 'internal/index/{0}?scanall=true'.format(id) api = self.index_baseUrl + url headers = {'Content-type': 'application/json','Authorization': 'Basic %s' % authorization} params = json.loads("{0}".format(body).replace('\'', '"').replace('True', 'true').replace('False', 'false')) status, content, header = self._http_request( api, 'GET', headers=headers, params=json.dumps(params).encode("ascii", "ignore")) if not status: raise Exception(content) # Following line is added since the content uses chunked encoding chunkless_content = content.decode().replace("][", ", \n") return json.loads(chunkless_content) def range_scan_gsi_index_with_rest(self, id, body, username="Administrator", password="password"): if "limit" not in list(body.keys()): body["limit"] = 300000 authorization = self.get_authorization(username, password) url = 'internal/index/{0}?range=true'.format(id) api = self.index_baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} params = json.loads("{0}".format(body).replace( '\'', '"').replace('True', 'true').replace('False', 'false')) status, content, header = self._http_request( api, 'GET', headers=headers, params=json.dumps(params).encode("ascii", "ignore")) if not status: raise Exception(content) #Below line is there because of MB-20758 content = content.split(b'[]')[0].decode() # Following line is added since the content uses chunked encoding chunkless_content = content.decode().replace("][", ", \n") return json.loads(chunkless_content) def multiscan_for_gsi_index_with_rest(self, id, body, username="Administrator", password="password"): authorization = self.get_authorization(username, password) url = 'internal/index/{0}?multiscan=true'.format(id) api = self.index_baseUrl + url headers = {'Accept': 'application/json','Authorization': 'Basic %s' % authorization} params = json.loads("{0}".format(body).replace('\'', '"').replace( 'True', 'true').replace('False', 'false').replace( "~[]{}UnboundedtruenilNA~", "~[]{}UnboundedTruenilNA~")) params = json.dumps(params).encode("ascii", "ignore").decode().replace("\\\\", "\\") log.info(json.dumps(params).encode("ascii", "ignore")) status, content, header = self._http_request(api, 'GET', headers=headers, params=params) if not status: raise Exception(content) #Below line is there because of MB-20758 content = content.split(b'[]')[0].decode() # Following line is added since the content uses chunked encoding chunkless_content = content.replace("][", ", \n") if chunkless_content: return json.loads(chunkless_content) else: return content def multiscan_count_for_gsi_index_with_rest(self, id, body, username="Administrator", password="password"): authorization = self.get_authorization(username, password) url = 'internal/index/{0}?multiscancount=true'.format(id) api = self.index_baseUrl + url headers = {'Accept': 'application/json','Authorization': 'Basic %s' % authorization} count_cmd_body = body.replace('\'', '"').replace('True', 'true').replace('False', 'false') count_cmd_body = count_cmd_body.replace("~[]{}UnboundedtruenilNA~", "~[]{}UnboundedTruenilNA~") params = json.loads(count_cmd_body) params = json.dumps(params).encode("ascii", "ignore").decode().replace("\\\\", "\\") log.info(json.dumps(params).encode("ascii", "ignore")) status, content, header = self._http_request(api, 'GET', headers=headers, params=params) if not status: raise Exception(content) #Below line is there because of MB-20758 content = content.split(b'[]')[0].decode() # Following line is added since the content uses chunked encoding chunkless_content = content.replace("][", ", \n") if chunkless_content: return json.loads(chunkless_content) else: return content 'Get list of all roles that exist in the system' def retrive_all_user_role(self): url = "/settings/rbac/roles" api = self.baseUrl + url status, content, header = self._http_request(api, 'GET') if not status: raise Exception(content) return json.loads(content) 'Get list of current users and rols assigned to them' def retrieve_user_roles(self): url = "/settings/rbac/users" api = self.baseUrl + url status, content, header = self._http_request(api, 'GET') if not status: raise Exception(content) return json.loads(content) ''' Add/Update user role assignment user_id=userid of the user to act on payload=name=<nameofuser>&roles=admin,cluster_admin''' def set_user_roles(self, user_id, payload): url = "settings/rbac/users/" + user_id api = self.baseUrl + url status, content, header = self._http_request(api, 'PUT', payload) if not status: raise Exception(content) return json.loads(content) ''' Delete user from couchbase role assignment user_id=userid of user to act on''' def delete_user_roles(self, user_id): url = "settings/rbac/users/local/" + user_id api = self.baseUrl + url status, content, header = self._http_request(api, 'DELETE') if not status: raise Exception(content) return json.loads(content) ''' Returns base64 string of username:password ''' def get_authorization(self, username, password): credentials = '{}:{}'.format(username, password) authorization = base64.encodebytes(credentials.encode('utf-8')) return authorization.decode('utf-8').rstrip('\n') ''' Return list of permission with True/False if user has permission or not user_id = userid for checking permission password = password for userid permission_set=cluster.bucket[default].stats!read,cluster.bucket[default]!write ''' def check_user_permission(self, user_id, password, permission_set): url = "pools/default/checkPermissions/" api = self.baseUrl + url authorization = self.get_authorization(user_id, password) header = {'Content-Type': 'application/x-www-form-urlencoded', 'Authorization': 'Basic %s' % authorization, 'Accept': '*/*'} status, content, header = self._http_request(api, 'POST', params=permission_set, headers=header) if not status: raise Exception(content) return json.loads(content) ''' Add/Update user role assignment user_id=userid of the user to act on payload=name=<nameofuser>&roles=admin,cluster_admin&password=<password> if roles=<empty> user will be created with no roles''' def add_set_builtin_user(self, user_id, payload): url = "settings/rbac/users/local/" + user_id api = self.baseUrl + url status, content, header = self._http_request(api, 'PUT', payload) if not status: raise Exception(content) return json.loads(content) ''' Add External User ''' def add_external_user(self,user_id,payload): url = "settings/rbac/users/external/" + user_id api = self.baseUrl + url status, content, header = self._http_request(api, 'PUT', payload) if not status: raise Exception(content) return json.loads(content) ''' Delete External User ''' def delete_external_user(self,user_id): url = "settings/rbac/users/external/" + user_id api = self.baseUrl + url status, content, header = self._http_request(api, 'DELETE') if not status: raise Exception(content) return json.loads(content) ''' Delete built-in user ''' def delete_builtin_user(self, user_id): url = "settings/rbac/users/local/" + user_id api = self.baseUrl + url status, content, header = self._http_request(api, 'DELETE') if not status: raise Exception(content) return json.loads(content) ''' Add/Update user role assignment user_id=userid of the user to act on password=<new password>''' def change_password_builtin_user(self, user_id, password): url = "controller/changePassword/" + user_id api = self.baseUrl + url status, content, header = self._http_request(api, 'POST', password) if not status: raise Exception(content) return json.loads(content) # Applicable to eventing service ''' Eventing lifecycle operation ''' def lifecycle_operation(self, name, operation,body=None): if self.eventing_role: authorization = self.get_authorization("eventing_admin", "password") else: authorization = self.get_authorization(self.username, self.password) url = "api/v1/functions/" + name +"/"+ operation api = self.eventing_baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} if body != None: status, content, header = self._http_request(api, 'POST', headers=headers, params=json.dumps(body).encode("ascii", "ignore")) else: status, content, header = self._http_request(api, 'POST', headers=headers) if not status: raise Exception(content) return content ''' Save the Function so that it is visible in UI ''' def save_function(self, name, body): authorization = self.get_authorization(self.username, self.password) url = "_p/event/saveAppTempStore/?name=" + name api = self.baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'POST', headers=headers, params=json.dumps(body).encode("ascii", "ignore")) if not status: raise Exception(content) return content ''' Deploy the Function ''' def deploy_function(self, name, body): authorization = self.get_authorization(self.username, self.password) url = "_p/event/setApplication/?name=" + name api = self.baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'POST', headers=headers, params=json.dumps(body).encode("ascii", "ignore")) if not status: raise Exception(content) return content ''' GET all the Functions ''' def get_all_functions(self): if self.eventing_role: authorization = self.get_authorization("eventing_admin", "password") else: authorization = self.get_authorization(self.username, self.password) url = "api/v1/functions" api = self.eventing_baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'GET', headers=headers) if not status: raise Exception(content) return content ''' Undeploy the Function ''' def set_settings_for_function(self, name, body): if self.eventing_role: authorization = self.get_authorization("eventing_admin", "password") else: authorization = self.get_authorization(self.username, self.password) url = "api/v1/functions/" + name +"/settings" api = self.eventing_baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'POST', headers=headers, params=json.dumps(body).encode("ascii", "ignore")) if not status: raise Exception(content) return content ''' deploy the Function ''' def deploy_function_by_name(self, name): if self.eventing_role: authorization = self.get_authorization("eventing_admin", "password") else: authorization = self.get_authorization(self.username, self.password) url = "api/v1/functions/" + name + "/settings" body = {"deployment_status": True, "processing_status": True} api = self.eventing_baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'POST', headers=headers, params=json.dumps(body).encode("ascii", "ignore")) if not status: raise Exception(content) return content ''' pause the Function ''' def pause_function_by_name(self, name): if self.eventing_role: authorization = self.get_authorization("eventing_admin", "password") else: authorization = self.get_authorization(self.username, self.password) url = "api/v1/functions/" + name + "/settings" body = {"deployment_status": True, "processing_status": False} api = self.eventing_baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'POST', headers=headers, params=json.dumps(body).encode("ascii", "ignore")) if not status: raise Exception(content) return content ''' undeploy the Function ''' def undeploy_function(self, name): if self.eventing_role: authorization = self.get_authorization("eventing_admin", "password") else: authorization = self.get_authorization(self.username, self.password) url = "api/v1/functions/" + name +"/settings" body= {"deployment_status": False, "processing_status": False} api = self.eventing_baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'POST', headers=headers, params=json.dumps(body).encode("ascii", "ignore")) if not status: raise Exception(content) return content ''' Delete all the functions ''' def delete_all_function(self): if self.eventing_role: authorization = self.get_authorization("eventing_admin", "password") else: authorization = self.get_authorization(self.username, self.password) url = "api/v1/functions" api = self.eventing_baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'DELETE', headers=headers) if not status: raise Exception(content) return content ''' Delete single function ''' def delete_single_function(self, name): if self.eventing_role: authorization = self.get_authorization("eventing_admin", "password") else: authorization = self.get_authorization(self.username, self.password) url = "api/v1/functions/" + name api = self.eventing_baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'DELETE', headers=headers) if not status: raise Exception(content) return content ''' Delete the Function from UI ''' def delete_function_from_temp_store(self, name): authorization = self.get_authorization(self.username, self.password) url = "_p/event/deleteAppTempStore/?name=" + name api = self.baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'DELETE', headers=headers) if not status: raise Exception(content) return content ''' Delete the Function ''' def delete_function(self, name): authorization = self.get_authorization(self.username, self.password) url = "_p/event/deleteApplication/?name=" + name api = self.baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'DELETE', headers=headers) if not status: raise Exception(content) return content ''' Export the Function ''' def export_function(self, name): export_map = {} if self.eventing_role: authorization = self.get_authorization("eventing_admin", "password") else: authorization = self.get_authorization(self.username, self.password) url = "api/v1/export/" + name api = self.eventing_baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'GET', headers=headers) if not status: raise Exception(content) if status: json_parsed = json.loads(content) for key in list(json_parsed[0].keys()): # returns an array tokens = key.split(":") val = json_parsed[0][key] if len(tokens) == 1: field = tokens[0] export_map[field] = val return export_map ''' Import the Function ''' def import_function(self, body): if self.eventing_role: authorization = self.get_authorization("eventing_admin", "password") else: authorization = self.get_authorization(self.username, self.password) url = "api/v1/import" api = self.eventing_baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'POST', headers=headers, params=body) if not status: raise Exception(content) return content ''' Ensure that the eventing node is out of bootstrap node ''' def get_deployed_eventing_apps(self): if self.eventing_role: authorization = self.get_authorization("eventing_admin", "password") else: authorization = self.get_authorization(self.username, self.password) url = "getDeployedApps" api = self.eventing_baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'GET', headers=headers) if not status: raise Exception(content) return json.loads(content) ''' Ensure that the eventing node is out of bootstrap node ''' def get_running_eventing_apps(self): if self.eventing_role: authorization = self.get_authorization("eventing_admin", "password") else: authorization = self.get_authorization(self.username, self.password) url = "getRunningApps" api = self.eventing_baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'GET', headers=headers) if not status: raise Exception(content) return json.loads(content) ''' composite status of a handler ''' def get_composite_eventing_status(self): if self.eventing_role: authorization = self.get_authorization("eventing_admin", "password") else: authorization = self.get_authorization(self.username, self.password) url = "api/v1/status" api = self.eventing_baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'GET', headers=headers) if not status: raise Exception(content) return json.loads(content) ''' Get Eventing processing stats ''' def get_event_processing_stats(self, name, eventing_map=None): if eventing_map is None: eventing_map = {} if self.eventing_role: authorization = self.get_authorization("eventing_admin", "password") else: authorization = self.get_authorization(self.username, self.password) url = "getEventProcessingStats?name=" + name api = self.eventing_baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'GET', headers=headers) if status: json_parsed = json.loads(content) for key in list(json_parsed.keys()): tokens = key.split(":") val = json_parsed[key] if len(tokens) == 1: field = tokens[0] eventing_map[field] = val return eventing_map ''' Get Aggregate Eventing processing stats ''' def get_aggregate_event_processing_stats(self, name, eventing_map=None): if eventing_map is None: eventing_map = {} if self.eventing_role: authorization = self.get_authorization("eventing_admin", "password") else: authorization = self.get_authorization(self.username, self.password) url = "getAggEventProcessingStats?name=" + name api = self.eventing_baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'GET', headers=headers) if status: json_parsed = json.loads(content) for key in list(json_parsed.keys()): tokens = key.split(":") val = json_parsed[key] if len(tokens) == 1: field = tokens[0] eventing_map[field] = val return eventing_map ''' Get Eventing execution stats ''' def get_event_execution_stats(self, name, eventing_map=None): if eventing_map is None: eventing_map = {} if self.eventing_role: authorization = self.get_authorization("eventing_admin", "password") else: authorization = self.get_authorization(self.username, self.password) url = "getExecutionStats?name=" + name api = self.eventing_baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'GET', headers=headers) if status: json_parsed = json.loads(content) for key in list(json_parsed.keys()): tokens = key.split(":") val = json_parsed[key] if len(tokens) == 1: field = tokens[0] eventing_map[field] = val return eventing_map ''' Get Eventing failure stats ''' def get_event_failure_stats(self, name, eventing_map=None): if eventing_map is None: eventing_map = {} if self.eventing_role: authorization = self.get_authorization("eventing_admin", "password") else: authorization = self.get_authorization(self.username, self.password) url = "getFailureStats?name=" + name api = self.eventing_baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'GET', headers=headers) if status: json_parsed = json.loads(content) for key in list(json_parsed.keys()): tokens = key.split(":") val = json_parsed[key] if len(tokens) == 1: field = tokens[0] eventing_map[field] = val return eventing_map ''' Get all eventing stats ''' def get_all_eventing_stats(self, seqs_processed=False, eventing_map=None): if eventing_map is None: eventing_map = {} if self.eventing_role: authorization = self.get_authorization("eventing_admin", "password") else: authorization = self.get_authorization(self.username, self.password) if seqs_processed: url = "api/v1/stats?type=full" else: url = "api/v1/stats" api = self.eventing_baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'GET', headers=headers) if not status: raise Exception(content) return json.loads(content) ''' Cleanup eventing ''' def cleanup_eventing(self): if self.eventing_role: authorization = self.get_authorization("eventing_admin", "password") else: authorization = self.get_authorization(self.username, self.password) url = "cleanupEventing" api = self.eventing_baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'GET', headers=headers) if not status: raise Exception(content) return content ''' enable debugger ''' def enable_eventing_debugger(self): authorization = self.get_authorization(self.username, self.password) url = "_p/event/api/v1/config" api = self.baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} body="{\"enable_debugger\": true}" status, content, header = self._http_request(api, 'POST', headers=headers, params=body) if not status: raise Exception(content) return content ''' disable debugger ''' def disable_eventing_debugger(self): authorization = self.get_authorization(self.username, self.password) url = "_p/event/api/v1/config" api = self.baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} body = "{\"enable_debugger\": false}" status, content, header = self._http_request(api, 'POST', headers=headers, params=body) if not status: raise Exception(content) return content ''' Start debugger ''' def start_eventing_debugger(self, name): authorization = self.get_authorization(self.username, self.password) url="/pools/default" api = self.baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'GET', headers=headers) url = "_p/event/startDebugger/?name=" + name api = self.baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'POST', headers=headers, params=content) if not status: raise Exception(content) return content ''' Stop debugger ''' def stop_eventing_debugger(self, name): authorization = self.get_authorization(self.username, self.password) url = "_p/event/stopDebugger/?name=" + name api = self.baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'POST', headers=headers) if not status: raise Exception(content) return content ''' Get debugger url ''' def get_eventing_debugger_url(self, name): authorization = self.get_authorization(self.username, self.password) url = "_p/event/getDebuggerUrl/?name=" + name api = self.baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'POST', headers=headers) if not status: raise Exception(content) return content ''' allow inter bucket recursion ''' def allow_interbucket_recursion(self): authorization = self.get_authorization(self.username, self.password) url = "_p/event/api/v1/config" api = self.baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} body = "{\"allow_interbucket_recursion\": true}" status, content, header = self._http_request(api, 'POST', headers=headers, params=body) if not status: raise Exception(content) return content ''' update eventing config ''' def update_eventing_config(self,body): authorization = self.get_authorization(self.username, self.password) url = "_p/event/api/v1/config" api = self.baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'POST', headers=headers, params=body) if not status: raise Exception(content) return content ''' GET eventing config ''' def get_eventing_config(self): authorization = self.get_authorization(self.username, self.password) url = "_p/event/api/v1/config" api = self.baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'GET', headers=headers, params='') if not status: raise Exception(content) return content ''' update eventing config function wise ''' def update_eventing_config_per_function(self, body, name): authorization = self.get_authorization(self.username, self.password) url = "api/v1/functions/" + name + "/config" api = self.eventing_baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'POST', headers=headers, params=json.dumps(body).encode("ascii", "ignore")) if not status: raise Exception(content) return content ''' GET eventing config for single function ''' def get_eventing_config_per_function(self, name): authorization = self.get_authorization(self.username, self.password) url = "api/v1/functions/" + name + "/config" api = self.eventing_baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'GET', headers=headers, params='') if not status: raise Exception(content) return content ''' Update function appcode ''' def update_function_appcode(self, body, name): authorization = self.get_authorization(self.username, self.password) url = "api/v1/functions/" + name + "/appcode" api = self.eventing_baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'POST', headers=headers, params=body) if not status: raise Exception(content) return content ''' Get function appcode ''' def get_function_appcode(self, name): authorization = self.get_authorization(self.username, self.password) url = "api/v1/functions/" + name + "/appcode" api = self.eventing_baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'GET', headers=headers, params='') if not status: raise Exception(content) return content ''' Get eventing rebalance status ''' def get_eventing_rebalance_status(self): if self.eventing_role: authorization = self.get_authorization("eventing_admin", "password") else: authorization = self.get_authorization(self.username, self.password) url = "getAggRebalanceStatus" api = self.eventing_baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'GET', headers=headers) if status: return content ''' Get application logs ''' def get_app_logs(self,handler_name): if self.eventing_role: authorization = self.get_authorization("eventing_admin", "password") else: authorization = self.get_authorization(self.username, self.password) url = "getAppLog?aggregate=true&name="+handler_name api = self.eventing_baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'GET', headers=headers) if status: return content def create_function(self, name, body): if self.eventing_role: authorization = self.get_authorization("eventing_admin", "password") else: authorization = self.get_authorization(self.username, self.password) url = "api/v1/functions/" + name api = self.eventing_baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'POST', headers=headers, params=json.dumps(body).encode("ascii", "ignore")) if not status: raise Exception(content) return content def update_function(self, name, body): if self.eventing_role: authorization = self.get_authorization("eventing_admin", "password") else: authorization = self.get_authorization(self.username, self.password) url = "api/v1/functions/" + name api = self.eventing_baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} body['appname']=name status, content, header = self._http_request(api, 'POST', headers=headers, params=json.dumps(body).encode("ascii", "ignore")) if not status: raise Exception(content) return content def get_function_details(self, name): if self.eventing_role: authorization = self.get_authorization("eventing_admin", "password") else: authorization = self.get_authorization(self.username, self.password) url = "api/v1/functions/" + name api = self.eventing_baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'GET', headers=headers) if not status: raise Exception(content) return content def get_eventing_go_routine_dumps(self): if self.eventing_role: authorization = self.get_authorization("eventing_admin", "password") else: authorization = self.get_authorization(self.username, self.password) url = "debug/pprof/goroutine?debug=1" api = self.eventing_baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'GET', headers=headers) if not status: raise Exception(content) return content def set_eventing_retry(self, name, body): if self.eventing_role: authorization = self.get_authorization("eventing_admin", "password") else: authorization = self.get_authorization(self.username, self.password) url = "api/v1/functions/" + name + "/retry" api = self.eventing_baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'POST', headers=headers, params=json.dumps(body).encode("ascii", "ignore")) if not status: raise Exception(content) return content def get_user(self, user_id): url = "settings/rbac/users/" api = self.baseUrl + url status, content, header = self._http_request(api, "GET") if content is not None: content_json = json.loads(content) for i in range(len(content_json)): user = content_json[i] if user.get('id') == user_id: return user return {} """ From 6.5.0, enable IPv6 on cluster/node needs 2 settings default is set to IPv6 We need to disable auto failover first, then set network version Then enable autofaiover again. """ def enable_ip_version(self, afamily='ipv6', afamilyOnly='false'): log.info("Start enable {0} on this node {1}".format(afamily, self.baseUrl)) self.update_autofailover_settings(False, 60) params = urllib.parse.urlencode({'afamily': afamily, 'afamilyOnly': afamilyOnly, 'nodeEncryption': 'off'}) api = "{0}node/controller/enableExternalListener".format(self.baseUrl) status, content, header = self._http_request(api, 'POST', params) if status: params = urllib.parse.urlencode({'afamily': afamily, 'afamilyOnly': afamilyOnly, 'nodeEncryption': 'off'}) api = "{0}node/controller/setupNetConfig".format(self.baseUrl) status, content, header = self._http_request(api, 'POST', params) if status: log.info("Done enable {0} on this node {1}".format(afamily, self.baseUrl)) else: log.error("Failed to set 'setupNetConfig' on this node {0}" .format(self.baseUrl)) raise Exception(content) else: log.error("Failed to set 'enableExternalListener' on this node {0}" .format(self.baseUrl)) raise Exception(content) if afamilyOnly == 'true': api = "{0}node/controller/disableUnusedExternalListeners".format(self.baseUrl) status, _, _ = self._http_request(api, 'POST', params) if not status: log.error("Failed to set 'disableUnusedExternalListeners' on this node {0}" .format(self.baseUrl)) self.update_autofailover_settings(True, 60) # These methods are added for Auto-Rebalance On Failure tests def set_retry_rebalance_settings(self, body): url = "settings/retryRebalance" api = self.baseUrl + url params = urllib.parse.urlencode(body) headers = self._create_headers() status, content, header = self._http_request(api, 'POST', headers=headers, params=params) if not status: raise Exception(content) return content def get_retry_rebalance_settings(self): authorization = self.get_authorization(self.username, self.password) url = "settings/retryRebalance" api = self.baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'GET', headers=headers) if not status: raise Exception(content) return content def get_pending_rebalance_info(self): authorization = self.get_authorization(self.username, self.password) url = "pools/default/pendingRetryRebalance" api = self.baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'GET', headers=headers) if not status: raise Exception(content) return content def cancel_pending_rebalance(self, id): authorization = self.get_authorization(self.username, self.password) url = "controller/cancelRebalanceRetry/" + str(id) api = self.baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'POST', headers=headers) if not status: raise Exception(content) return content # Upload a root certificate def upload_cluster_ca(self, certificate): """ Upload a certificate the cluster This can be a root certificate or an intermediate certificate. """ headers = self._create_capi_headers() headers['Content-Type'] = 'application/octet-stream' status, content, header = self._http_request(self.baseUrl + "controller/uploadClusterCA", 'POST', headers=headers, params=certificate) return status, content def load_trusted_CAs(self): """ Instructs the cluster to load trusted CAs(.pem files) from the node's inbox/CA folder """ status, content, header = self._http_request(self.baseUrl + "node/controller/loadTrustedCAs", 'POST') return status, content def reload_certificate(self, params=''): """ Reload certificate Call this function after uploading a certificate to the cluster to activate the new certificate. """ headers = self._create_capi_headers() status, content, header = self._http_request(self.baseUrl + "node/controller/reloadCertificate", 'POST', headers=headers, params=params) return status, content def get_trusted_CAs(self): """ Get all (default + uploaded) trusted CA certs information """ status, content, header = self._http_request(self.baseUrl + "/pools/default/trustedCAs", 'GET') return status, content def client_cert_auth(self, state, prefixes): """ Args: state (str): Either 'enable', 'mandatory' or 'disable'. prefixes (list(dict)): A list of dicts of containing the keys 'path', 'prefix' and 'delimiter' e.g. {"path": .., "prefix": .., "delimiter", ..} """ headers = self._create_capi_headers() params = json.dumps({'state': state, 'prefixes': prefixes}) status, content, header = self._http_request(self.baseUrl + "settings/clientCertAuth", 'POST', headers=headers, params=params) return status, content class MembaseServerVersion: def __init__(self, implementationVersion='', componentsVersion=''): self.implementationVersion = implementationVersion self.componentsVersion = componentsVersion # this class will also contain more node related info class OtpNode(object): def __init__(self, id='', status=''): self.id = id self.ip = '' self.replication = '' self.port = CbServer.port if CbServer.use_https: self.port = CbServer.ssl_port self.gracefulFailoverPossible = 'true' # extract ns ip from the otpNode string # its normally ns_1@10.20.30.40 if id.find('@') >= 0: self.ip = id[id.index('@') + 1:] if self.ip.count(':') > 0: # raw ipv6? enclose in square brackets self.ip = '[' + self.ip + ']' self.status = status class NodeInfo(object): def __init__(self): self.availableStorage = None # list self.memoryQuota = None class NodeDataStorage(object): def __init__(self): self.type = '' # hdd or ssd self.path = '' self.index_path = '' self.quotaMb = '' self.state = '' # ok def __str__(self): return '{0}'.format({'type': self.type, 'path': self.path, 'index_path' : self.index_path, 'quotaMb': self.quotaMb, 'state': self.state}) def get_data_path(self): return self.path def get_index_path(self): return self.index_path class NodeDiskStorage(object): def __init__(self): self.type = 0 self.path = '' self.sizeKBytes = 0 self.usagePercent = 0 class Bucket(object): def __init__(self, bucket_size='', name="", num_replicas=0, port=11211, master_id=None, type='', eviction_policy="valueOnly", bucket_priority=None, uuid="", lww=False, maxttl=None, bucket_storage=None): self.name = name self.port = port self.type = type self.nodes = None self.stats = None self.servers = [] self.vbuckets = [] self.forward_map = [] self.numReplicas = num_replicas self.bucket_size = bucket_size self.kvs = {1:KVStore()} self.master_id = master_id self.eviction_policy = eviction_policy self.bucket_priority = bucket_priority self.uuid = uuid self.lww = lww self.maxttl = maxttl self.bucket_storage = bucket_storage def __str__(self): return self.name class Node(object): def __init__(self): self.uptime = 0 self.memoryTotal = 0 self.memoryFree = 0 self.mcdMemoryReserved = 0 self.mcdMemoryAllocated = 0 self.status = "" self.hostname = "" self.clusterCompatibility = "" self.clusterMembership = "" self.recoveryType = "" self.version = "" self.os = "" self.ports = [] self.availableStorage = [] self.storage = [] self.memoryQuota = 0 self.moxi = 11211 self.memcached = 11210 self.id = "" self.ip = "" self.rest_username = "" self.rest_password = "" self.port = 8091 if CbServer.use_https: self.port = CbServer.ssl_port self.services = [] self.storageTotalRam = 0 @property def failed_over_state_a(self): """ The state in which a node is failed-over and is requesting a recovery type from the user """ return self.clusterMembership == "inactiveFailed" @property def failed_over_state_b(self): """ The state in which a node is failed-over and the user has selected a recovery type """ return self.clusterMembership == "inactiveAdded" and self.recoveryType @property def has_failed_over(self): """ Returns tree if a node is in the failed-over state """ return self.failed_over_state_a or self.failed_over_state_b @property def complete_version(self): """ Returns the complete version of the node (e.g. 6.5.0) """ return self.version.split('-')[0] @property def major_version(self): """ Returns the major version of the node (e.g. 6.5) """ return self.complete_version.rsplit('.', 1)[0] @property def minor_version(self): """ Returns the minor version of the node (e.g. 0) """ return self.complete_version.rsplit('.', 1)[1] class AutoFailoverSettings(object): def __init__(self): self.enabled = True self.timeout = 0 self.count = 0 self.failoverOnDataDiskIssuesEnabled = False self.failoverOnDataDiskIssuesTimeout = 0 self.maxCount = 1 self.failoverServerGroup = False self.can_abort_rebalance = False class AutoReprovisionSettings(object): def __init__(self): self.enabled = True self.max_nodes = 0 self.count = 0 class NodePort(object): def __init__(self): self.proxy = 0 self.direct = 0 class BucketStats(object): def __init__(self): self.opsPerSec = 0 self.itemCount = 0 self.diskUsed = 0 self.memUsed = 0 self.ram = 0 class vBucket(object): def __init__(self): self.master = '' self.replica = [] self.id = -1 class RestParser(object): def parse_index_status_response(self, parsed): index_map = {} for map in parsed["indexes"]: bucket_name = map['bucket'] if bucket_name not in list(index_map.keys()): index_map[bucket_name] = {} index_name = map['index'] index_map[bucket_name][index_name] = {} index_map[bucket_name][index_name]['status'] = map['status'] index_map[bucket_name][index_name]['progress'] = str(map['progress']) index_map[bucket_name][index_name]['definition'] = map['definition'] if len(map['hosts']) == 1: index_map[bucket_name][index_name]['hosts'] = map['hosts'][0] else: index_map[bucket_name][index_name]['hosts'] = map['hosts'] index_map[bucket_name][index_name]['id'] = map['id'] return index_map def parse_index_stats_response(self, parsed, index_map=None): if index_map == None: index_map = {} for key in list(parsed.keys()): tokens = key.split(":") val = parsed[key] if len(tokens) == 3 and 'MAINT_STREAM' not in tokens[0] and 'INIT_STREAM' not in tokens[0]: bucket = tokens[0] index_name = tokens[1] stats_name = tokens[2] if bucket not in list(index_map.keys()): index_map[bucket] = {} if index_name not in list(index_map[bucket].keys()): index_map[bucket][index_name] = {} index_map[bucket][index_name][stats_name] = val return index_map def parse_index_stats_response_collections(self, parsed, index_map=None): if index_map == None: index_map = {} for key in list(parsed.keys()): tokens = key.split(":") val = parsed[key] if len(tokens) == 3 and 'MAINT_STREAM' not in tokens[0] and 'INIT_STREAM' not in tokens[0]: bucket = tokens[0] index_name = tokens[1] stats_name = tokens[2] if bucket not in list(index_map.keys()): index_map[bucket] = {} if index_name not in list(index_map[bucket].keys()): index_map[bucket][index_name] = {} index_map[bucket][index_name][stats_name] = val elif len(tokens) == 5 and 'MAINT_STREAM' not in tokens[0] and 'INIT_STREAM' not in tokens[0]: bucket = tokens[0] scope_name = tokens[1] collection_name = tokens[2] index_name = tokens[3] stats_name = tokens[4] keyspace = f'default:{bucket}.{scope_name}.{collection_name}' if keyspace not in list(index_map.keys()): index_map[keyspace] = {} if index_name not in list(index_map[keyspace].keys()): index_map[keyspace][index_name] = {} index_map[keyspace][index_name][stats_name] = val return index_map def parse_get_nodes_response(self, parsed): node = Node() node.uptime = parsed['uptime'] node.memoryFree = parsed['memoryFree'] node.memoryTotal = parsed['memoryTotal'] node.mcdMemoryAllocated = parsed['mcdMemoryAllocated'] node.mcdMemoryReserved = parsed['mcdMemoryReserved'] node.status = parsed['status'] node.hostname = parsed['hostname'] node.clusterCompatibility = parsed['clusterCompatibility'] node.clusterMembership = parsed['clusterMembership'] if 'recoveryType' in parsed: node.recoveryType = parsed['recoveryType'] node.version = parsed['version'] node.curr_items = 0 if 'interestingStats' in parsed and 'curr_items' in parsed['interestingStats']: node.curr_items = parsed['interestingStats']['curr_items'] node.port = parsed["hostname"][parsed["hostname"].rfind(":") + 1:] if CbServer.use_https: str_node_port = CbServer.ssl_port_map.get(str(node.port), str(node.port)) if type(node.port) == int: node.port = int(str_node_port) node.os = parsed['os'] if "services" in parsed: node.services = parsed["services"] if "otpNode" in parsed: node.id = parsed["otpNode"] if "hostname" in parsed: # should work for both: ipv4 and ipv6 node.ip = parsed["hostname"].rsplit(":", 1)[0] # memoryQuota if 'memoryQuota' in parsed: node.memoryQuota = parsed['memoryQuota'] if 'availableStorage' in parsed: availableStorage = parsed['availableStorage'] for key in availableStorage: # let's assume there is only one disk in each noce dict_parsed = parsed['availableStorage'] if 'path' in dict_parsed and 'sizeKBytes' in dict_parsed and 'usagePercent' in dict_parsed: diskStorage = NodeDiskStorage() diskStorage.path = dict_parsed['path'] diskStorage.sizeKBytes = dict_parsed['sizeKBytes'] diskStorage.type = key diskStorage.usagePercent = dict_parsed['usagePercent'] node.availableStorage.append(diskStorage) log.info(diskStorage) if 'storage' in parsed: storage = parsed['storage'] for key in storage: disk_storage_list = storage[key] for dict_parsed in disk_storage_list: if 'path' in dict_parsed and 'state' in dict_parsed and 'quotaMb' in dict_parsed: dataStorage = NodeDataStorage() dataStorage.path = dict_parsed['path'] dataStorage.index_path = dict_parsed.get('index_path', '') dataStorage.quotaMb = dict_parsed['quotaMb'] dataStorage.state = dict_parsed['state'] dataStorage.type = key node.storage.append(dataStorage) # ports":{"proxy":11211,"direct":11210} if "ports" in parsed: ports = parsed["ports"] if "proxy" in ports: node.moxi = ports["proxy"] if "direct" in ports: node.memcached = ports["direct"] if CbServer.use_https: node.memcached = int(CbServer.ssl_port_map.get(str(node.memcached), str(node.memcached))) if "storageTotals" in parsed: storageTotals = parsed["storageTotals"] if storageTotals.get("ram"): if storageTotals["ram"].get("total"): ramKB = storageTotals["ram"]["total"] node.storageTotalRam = ramKB//(1024*1024) if node.mcdMemoryReserved == 0: node.mcdMemoryReserved = node.storageTotalRam if IS_CONTAINER: # the storage total values are more accurate than # mcdMemoryReserved - which is container host memory node.mcdMemoryReserved = node.storageTotalRam * 0.70 return node def parse_get_bucket_response(self, response): parsed = json.loads(response) return self.parse_get_bucket_json(parsed) def parse_get_bucket_json(self, parsed): bucket = Bucket() bucket.name = parsed['name'] bucket.uuid = parsed['uuid'] bucket.type = parsed['bucketType'] if 'proxyPort' in parsed: bucket.port = parsed['proxyPort'] bucket.nodes = list() if 'vBucketServerMap' in parsed: vBucketServerMap = parsed['vBucketServerMap'] serverList = vBucketServerMap['serverList'] bucket.servers.extend(serverList) if "numReplicas" in vBucketServerMap: bucket.numReplicas = vBucketServerMap["numReplicas"] # vBucketMapForward if 'vBucketMapForward' in vBucketServerMap: # let's gather the forward map vBucketMapForward = vBucketServerMap['vBucketMapForward'] counter = 0 for vbucket in vBucketMapForward: # there will be n number of replicas vbucketInfo = vBucket() vbucketInfo.master = serverList[vbucket[0]] if vbucket: for i in range(1, len(vbucket)): if vbucket[i] != -1: vbucketInfo.replica.append(serverList[vbucket[i]]) vbucketInfo.id = counter counter += 1 bucket.forward_map.append(vbucketInfo) vBucketMap = vBucketServerMap['vBucketMap'] counter = 0 for vbucket in vBucketMap: # there will be n number of replicas vbucketInfo = vBucket() vbucketInfo.master = serverList[vbucket[0]] if vbucket: for i in range(1, len(vbucket)): if vbucket[i] != -1: vbucketInfo.replica.append(serverList[vbucket[i]]) vbucketInfo.id = counter counter += 1 bucket.vbuckets.append(vbucketInfo) # now go through each vbucket and populate the info # who is master , who is replica # get the 'storageTotals' log.debug('read {0} vbuckets'.format(len(bucket.vbuckets))) stats = parsed['basicStats'] # vBucketServerMap bucketStats = BucketStats() log.debug('stats:{0}'.format(stats)) bucketStats.opsPerSec = stats['opsPerSec'] bucketStats.itemCount = stats['itemCount'] if bucket.type != "memcached": bucketStats.diskUsed = stats['diskUsed'] bucketStats.memUsed = stats['memUsed'] quota = parsed['quota'] bucketStats.ram = quota['ram'] bucket.stats = bucketStats nodes = parsed['nodes'] for nodeDictionary in nodes: node = Node() node.uptime = nodeDictionary['uptime'] node.memoryFree = nodeDictionary['memoryFree'] node.memoryTotal = nodeDictionary['memoryTotal'] node.mcdMemoryAllocated = nodeDictionary['mcdMemoryAllocated'] node.mcdMemoryReserved = nodeDictionary['mcdMemoryReserved'] node.status = nodeDictionary['status'] node.hostname = nodeDictionary['hostname'] if 'clusterCompatibility' in nodeDictionary: node.clusterCompatibility = nodeDictionary['clusterCompatibility'] if 'clusterMembership' in nodeDictionary: node.clusterCompatibility = nodeDictionary['clusterMembership'] node.version = nodeDictionary['version'] node.os = nodeDictionary['os'] if "ports" in nodeDictionary: ports = nodeDictionary["ports"] if "proxy" in ports: node.moxi = ports["proxy"] if "direct" in ports: node.memcached = ports["direct"] if CbServer.use_https: node.memcached = int(CbServer.ssl_port_map.get(str(node.memcached), str(node.memcached))) if "hostname" in nodeDictionary: value = str(nodeDictionary["hostname"]) node.ip = value[:value.rfind(":")] node.port = int(value[value.rfind(":") + 1:]) if CbServer.use_https: node.port = int(CbServer.ssl_port_map.get(str(node.port), str(node.port))) if "otpNode" in nodeDictionary: node.id = nodeDictionary["otpNode"] bucket.nodes.append(node) return bucket
import base64 import json import urllib.request, urllib.parse, urllib.error from urllib3._collections import HTTPHeaderDict from . import httplib2 import logger import traceback import socket import time import re import uuid from copy import deepcopy from threading import Thread from TestInput import TestInputSingleton from TestInput import TestInputServer from testconstants import MIN_KV_QUOTA, INDEX_QUOTA, FTS_QUOTA, CBAS_QUOTA from testconstants import COUCHBASE_FROM_VERSION_4, IS_CONTAINER, CLUSTER_QUOTA_RATIO from lib.Cb_constants.CBServer import CbServer try: from couchbase_helper.document import DesignDocument, View except ImportError: from lib.couchbase_helper.document import DesignDocument, View from memcached.helper.kvstore import KVStore from .exception import ServerAlreadyJoinedException, ServerUnavailableException, InvalidArgumentException from membase.api.exception import BucketCreationException, ServerSelfJoinException, ClusterRemoteException, \ RebalanceFailedException, FailoverFailedException, DesignDocCreationException, QueryViewException, \ ReadDocumentException, GetBucketInfoFailed, CompactViewFailed, SetViewInfoNotFound, AddNodeException, \ BucketFlushFailed, CBRecoveryFailedException, XDCRException, SetRecoveryTypeFailed, BucketCompactionException log = logger.Logger.get_logger() # helper library methods built on top of RestConnection interface class RestHelper(object): def __init__(self, rest_connection): self.rest = rest_connection def is_ns_server_running(self, timeout_in_seconds=360): log.info("-->is_ns_server_running?") end_time = time.time() + timeout_in_seconds while time.time() <= end_time: try: status = self.rest.get_nodes_self(5) if status is not None and status.status == 'healthy': return True else: if status is not None: log.warn("server {0}:{1} status is {2}"\ .format(self.rest.ip, self.rest.port, status.status)) else: log.warn("server {0}:{1} status is down"\ .format(self.rest.ip, self.rest.port)) except ServerUnavailableException: log.error("server {0}:{1} is unavailable"\ .format(self.rest.ip, self.rest.port)) time.sleep(5) msg = 'unable to connect to the node {0} even after waiting {1} seconds' log.error(msg.format(self.rest.ip, timeout_in_seconds)) return False def is_cluster_healthy(self, timeout=120): # get the nodes and verify that all the nodes.status are healthy nodes = self.rest.node_statuses(timeout) return all(node.status == 'healthy' for node in nodes) def rebalance_reached(self, percentage=100,retry_count=40): start = time.time() progress = 0 previous_progress = 0 retry = 0 while progress != -1 and progress < percentage and retry < retry_count: # -1 is error , -100 means could not retrieve progress progress = self.rest._rebalance_progress() if progress == -100: log.error("unable to retrieve rebalanceProgress.try again in 2 seconds") retry += 1 else: if previous_progress == progress: retry += 0.5 else: retry = 0 previous_progress = progress # sleep for 2 seconds time.sleep(3) if progress <= 0: log.error("rebalance progress code : {0}".format(progress)) return False elif retry >= retry_count: log.error("rebalance stuck on {0}%".format(progress)) return False else: duration = time.time() - start log.info('rebalance reached >{0}% in {1} seconds '.format(progress, duration)) return True # return true if cluster balanced, false if it needs rebalance def is_cluster_rebalanced(self): command = "ns_orchestrator:needs_rebalance()" status, content = self.rest.diag_eval(command) if status: return content.lower() == "false" log.error("can't define if cluster balanced") return None # this method will rebalance the cluster by passing the remote_node as # ejected node def remove_nodes(self, knownNodes, ejectedNodes, wait_for_rebalance=True): if len(ejectedNodes) == 0: return False self.rest.rebalance(knownNodes, ejectedNodes) if wait_for_rebalance: return self.rest.monitorRebalance() else: return False def vbucket_map_ready(self, bucket, timeout_in_seconds=360): end_time = time.time() + timeout_in_seconds while time.time() <= end_time: vBuckets = self.rest.get_vbuckets(bucket) if vBuckets: return True else: time.sleep(0.5) msg = 'vbucket map is not ready for bucket {0} after waiting {1} seconds' log.info(msg.format(bucket, timeout_in_seconds)) return False def bucket_exists(self, bucket): try: buckets = self.rest.get_buckets() names = [item.name for item in buckets] log.info("node {1} existing buckets : {0}" \ .format(names, self.rest.ip)) for item in buckets: if item.name == bucket: log.info("node {1} found bucket {0}" \ .format(bucket, self.rest.ip)) return True return False except Exception: return False def wait_for_node_status(self, node, expected_status, timeout_in_seconds): status_reached = False end_time = time.time() + timeout_in_seconds while time.time() <= end_time and not status_reached: nodes = self.rest.node_statuses() for n in nodes: if node.id == n.id: log.info('node {0} status : {1}'.format(node.id, n.status)) if n.status.lower() == expected_status.lower(): status_reached = True break if not status_reached: log.info("sleep for 5 seconds before reading the node.status again") time.sleep(5) log.info('node {0} status_reached : {1}'.format(node.id, status_reached)) return status_reached def _wait_for_task_pid(self, pid, end_time, ddoc_name): while (time.time() < end_time): new_pid, _ = self.rest._get_indexer_task_pid(ddoc_name) if pid == new_pid: time.sleep(5) continue else: return def _wait_for_indexer_ddoc(self, servers, ddoc_name, timeout=300): nodes = self.rest.get_nodes() servers_to_check = [] for node in nodes: for server in servers: if node.ip == server.ip and str(node.port) == str(server.port): servers_to_check.append(server) for server in servers_to_check: try: rest = RestConnection(server) log.info('Check index for ddoc %s , server %s' % (ddoc_name, server.ip)) end_time = time.time() + timeout log.info('Start getting index for ddoc %s , server %s' % (ddoc_name, server.ip)) old_pid, is_pid_blocked = rest._get_indexer_task_pid(ddoc_name) if not old_pid: log.info('Index for ddoc %s is not going on, server %s' % (ddoc_name, server.ip)) continue while is_pid_blocked: log.info('Index for ddoc %s is blocked, server %s' % (ddoc_name, server.ip)) self._wait_for_task_pid(old_pid, end_time, ddoc_name) old_pid, is_pid_blocked = rest._get_indexer_task_pid(ddoc_name) if time.time() > end_time: log.error("INDEX IS STILL BLOKED node %s ddoc % pid %" % (server, ddoc_name, old_pid)) break if old_pid: log.info('Index for ddoc %s is running, server %s' % (ddoc_name, server.ip)) self._wait_for_task_pid(old_pid, end_time, ddoc_name) except Exception as ex: log.error('unable to check index on server %s because of %s' % (server.ip, str(ex))) def _get_vbuckets(self, servers, bucket_name='default'): vbuckets_servers = {} for server in servers: buckets = RestConnection(server).get_buckets() if not buckets: return vbuckets_servers if bucket_name: bucket_to_check = [bucket for bucket in buckets if bucket.name == bucket_name][0] else: bucket_to_check = [bucket for bucket in buckets][0] vbuckets_servers[server] = {} vbs_active = [vb.id for vb in bucket_to_check.vbuckets if vb.master.startswith(str(server.ip))] vbs_replica = [] for replica_num in range(0, bucket_to_check.numReplicas): vbs_replica.extend([vb.id for vb in bucket_to_check.vbuckets if replica_num in vb.replica and vb.replica[replica_num].startswith(str(server.ip))]) vbuckets_servers[server]['active_vb'] = vbs_active vbuckets_servers[server]['replica_vb'] = vbs_replica return vbuckets_servers class RestConnection(object): def __new__(cls, serverInfo={}): # allow port to determine # behavior of restconnection port = None if isinstance(serverInfo, dict): if 'port' in serverInfo: port = serverInfo['port'] else: port = serverInfo.port if not port: port = CbServer.port if CbServer.use_https: port = CbServer.ssl_port if int(port) in range(9091, 9100): # return elastic search rest connection from membase.api.esrest_client import EsRestConnection obj = super(EsRestConnection,cls).__new__(cls) else: # default obj = object.__new__(cls) return obj def __init__(self, serverInfo): # serverInfo can be a json object/dictionary if isinstance(serverInfo, dict): self.ip = serverInfo["ip"] self.username = serverInfo["username"] self.password = serverInfo["password"] self.port = serverInfo["port"] self.index_port = CbServer.index_port self.fts_port = CbServer.fts_port self.query_port = CbServer.n1ql_port self.eventing_port = CbServer.eventing_port self.capi_port = CbServer.capi_port if "index_port" in list(serverInfo.keys()): self.index_port = serverInfo["index_port"] if "fts_port" in list(serverInfo.keys()): if serverInfo['fts_port']: self.fts_port = serverInfo["fts_port"] if "eventing_port" in list(serverInfo.keys()): if serverInfo['eventing_port']: self.eventing_port = serverInfo["eventing_port"] self.hostname = '' self.services = '' if "hostname" in serverInfo: self.hostname = serverInfo["hostname"] if "services" in serverInfo: self.services = serverInfo["services"] else: self.ip = serverInfo.ip self.username = serverInfo.rest_username self.password = serverInfo.rest_password self.port = serverInfo.port self.hostname = '' self.index_port = CbServer.index_port self.fts_port = CbServer.fts_port self.query_port = CbServer.n1ql_port self.eventing_port = CbServer.eventing_port self.capi_port = CbServer.capi_port self.services = "kv" self.debug_logs = False if hasattr(serverInfo, "services"): self.services = serverInfo.services if hasattr(serverInfo, 'index_port'): self.index_port = serverInfo.index_port if hasattr(serverInfo, 'query_port'): self.query_port = serverInfo.query_port if hasattr(serverInfo, 'fts_port'): if serverInfo.fts_port: self.fts_port = serverInfo.fts_port if hasattr(serverInfo, 'eventing_port'): if serverInfo.eventing_port: self.eventing_port = serverInfo.eventing_port if hasattr(serverInfo, 'hostname') and serverInfo.hostname and\ serverInfo.hostname.find(self.ip) == -1: self.hostname = serverInfo.hostname if hasattr(serverInfo, 'services'): self.services = serverInfo.services self.input = TestInputSingleton.input if self.input is not None: """ from watson, services param order and format: new_services=fts-kv-index-n1ql """ self.services_node_init = self.input.param("new_services", None) self.debug_logs = self.input.param("debug-logs", False) self.eventing_role = self.input.param('eventing_role', False) if CbServer.use_https: self.port = CbServer.ssl_port_map.get(str(self.port), str(self.port)) self.index_port = CbServer.ssl_port_map.get(str(self.index_port), str(self.index_port)) self.query_port = CbServer.ssl_port_map.get(str(self.query_port), str(self.query_port)) self.fts_port = CbServer.ssl_port_map.get(str(self.fts_port), str(self.fts_port)) self.eventing_port = CbServer.ssl_port_map.get(str(self.eventing_port), str(self.eventing_port)) self.capi_port = CbServer.ssl_port_map.get(str(self.capi_port), str(self.capi_port)) http_url = "http://%s:%s/" https_url = "https://%s:%s/" generic_url = http_url if CbServer.use_https: generic_url = https_url url_host = "%s" % self.ip if self.hostname: url_host = "%s" % self.hostname self.baseUrl = generic_url % (url_host, self.port) self.fts_baseUrl = generic_url % (url_host, self.fts_port) self.index_baseUrl = generic_url % (url_host, self.index_port) self.query_baseUrl = generic_url % (url_host, self.query_port) self.capiBaseUrl = generic_url % (url_host, self.capi_port) self.eventing_baseUrl = generic_url % (url_host, self.eventing_port) # Initialization of CBAS related params self.cbas_ip = self.ip self.cbas_port = CbServer.cbas_port if hasattr(self.input, 'cbas'): if self.input.cbas: self.cbas_node = self.input.cbas if hasattr(self.cbas_node, 'port'): self.cbas_port = self.cbas_node.port if hasattr(self.cbas_node, 'ip'): self.cbas_ip = self.cbas_node.ip if CbServer.use_https: self.cbas_port = CbServer.ssl_cbas_port self.cbas_base_url = generic_url % (self.cbas_ip, self.cbas_port) self.cbas_base_url = self.cbas_base_url[:-1] # for Node is unknown to this cluster error for iteration in range(5): http_res, success = self.init_http_request(api=self.baseUrl + "nodes/self") if not success and isinstance(http_res, str) and\ (http_res.find('Node is unknown to this cluster') > -1 or \ http_res.find('Unexpected server error, request logged') > -1): log.error("Error {0} was gotten, 5 seconds sleep before retry"\ .format(http_res)) time.sleep(5) if iteration == 2: log.error("node {0}:{1} is in a broken state!"\ .format(self.ip, self.port)) raise ServerUnavailableException(self.ip) continue else: break # determine the real couchApiBase for cluster_run # couchApiBase appeared in version 2.* if isinstance(http_res, dict): if not http_res or http_res["version"][0:2] == "1.": self.capiBaseUrl = self.baseUrl + "/couchBase" else: for iteration in range(5): if "couchApiBase" not in http_res.keys(): if self.is_cluster_mixed(): self.capiBaseUrl = self.baseUrl + "/couchBase" return time.sleep(0.2) http_res, success = self.init_http_request(self.baseUrl + 'nodes/self') else: if CbServer.use_https: self.capiBaseUrl = http_res["couchApiBaseHTTPS"] else: self.capiBaseUrl = http_res["couchApiBase"] return raise ServerUnavailableException("couchApiBase doesn't exist in nodes/self: %s " % http_res) def sasl_streaming_rq(self, bucket, timeout=120, disable_ssl_certificate_validation=True): api = self.baseUrl + 'pools/default/bucketsStreaming/{0}'.format(bucket) if isinstance(bucket, Bucket): api = self.baseUrl + 'pools/default/bucketsStreaming/{0}'.format(bucket.name) try: httplib2.Http(timeout=timeout, disable_ssl_certificate_validation=disable_ssl_certificate_validation).\ request(api, 'GET', '', headers=self._create_capi_headers()) except Exception as ex: log.warn('Exception while streaming: %s' % str(ex)) def open_sasl_streaming_connection(self, bucket, timeout=1000): if self.debug_logs: log.info("Opening sasl streaming connection for bucket {0}"\ .format((bucket, bucket.name)[isinstance(bucket, Bucket)])) t = Thread(target=self.sasl_streaming_rq, name="streaming_" + str(uuid.uuid4())[:4], args=(bucket, timeout)) try: t.start() except: log.warn("thread is not started") return None return t def is_cluster_mixed(self, timeout=120): http_res, success = self.init_http_request(self.baseUrl + 'pools/default', timeout=timeout) if http_res == 'unknown pool': return False try: versions = list({node["version"][:1] for node in http_res["nodes"]}) except: log.error('Error while processing cluster info {0}'.format(http_res)) # not really clear what to return but False see to be a good start until we figure what is happening return False if '1' in versions and '2' in versions: return True return False def is_cluster_compat_mode_greater_than(self, version): """ curl -v -X POST -u Administrator:welcome http://10.3.4.186:8091/diag/eval -d 'cluster_compat_mode:get_compat_version().' Returns : [3,2] if version = 3.2.0 """ status, content = self.diag_eval('cluster_compat_mode:get_compat_version().') if status: json_parsed = json.loads(content) cluster_ver = float("%s.%s" % (json_parsed[0], json_parsed[1])) if cluster_ver > version: return True return False def is_enterprise_edition(self): http_res, success = self.init_http_request(self.baseUrl + 'pools/default') if http_res == 'unknown pool': return False editions = [] community_nodes = [] """ get the last word in node["version"] as in "version": "2.5.1-1073-rel-enterprise" """ for node in http_res["nodes"]: editions.extend(node["version"].split("-")[-1:]) if "community" in node["version"].split("-")[-1:]: community_nodes.extend(node["hostname"].split(":")[:1]) if "community" in editions: log.error("IP(s) for node(s) with community edition {0}".format(community_nodes)) return False return True def init_http_request(self, api, timeout=120): content = None try: headers = self._create_capi_headers() status, content, header = self._http_request(api, 'GET', headers=headers, timeout=timeout) json_parsed = json.loads(content) if status: return json_parsed, True else: print("{0} with status {1}: {2}".format(api, status, json_parsed)) return json_parsed, False except ValueError as e: if content is not None: print("{0}: {1}".format(api, content)) else: print(e) return content, False def rename_node(self, hostname, username='Administrator', password='password'): params = urllib.parse.urlencode({'username': username, 'password': password, 'hostname': hostname}) api = "%snode/controller/rename" % self.baseUrl status, content, header = self._http_request(api, 'POST', params) return status, content def active_tasks(self): api = self.baseUrl + "pools/default/tasks" try: status, content, header = self._http_request(api, 'GET', headers=self._create_capi_headers()) json_parsed = json.loads(content) except ValueError as e: print(e) return "" return json_parsed def ns_server_tasks(self): api = self.baseUrl + 'pools/default/tasks' retries = 3 while retries: try: status, content, header = self._http_request(api, 'GET', headers=self._create_headers()) return json.loads(content) except ValueError: time.sleep(10) retries -= 1 return "" # DEPRECATED: use create_ddoc() instead. def create_view(self, design_doc_name, bucket_name, views, options=None): return self.create_ddoc(design_doc_name, bucket_name, views, options) def create_ddoc(self, design_doc_name, bucket, views, options=None): design_doc = DesignDocument(design_doc_name, views, options=options) if design_doc.name.find('/') != -1: design_doc.name = design_doc.name.replace('/', '%2f') design_doc.id = '_design/{0}'.format(design_doc.name) return self.create_design_document(bucket, design_doc) def create_design_document(self, bucket, design_doc): log.info("-->create_design_document") try: design_doc_name = design_doc.id api = '%s/%s/%s' % (self.capiBaseUrl, bucket, design_doc_name) if isinstance(bucket, Bucket): api = '%s/%s/%s' % (self.capiBaseUrl, bucket.name, design_doc_name) status, content, header = self._http_request(api, 'PUT', str(design_doc), headers=self._create_capi_headers()) except Exception as e: traceback.print_exc() if not status: raise DesignDocCreationException(design_doc_name, content) return json.loads(content.decode()) def is_index_triggered(self, ddoc_name, index_type='main'): run, block = self._get_indexer_task_pid(ddoc_name, index_type=index_type) if run or block: return True else: return False def _get_indexer_task_pid(self, ddoc_name, index_type='main'): active_tasks = self.active_tasks() if 'error' in active_tasks: return None if active_tasks: for task in active_tasks: if task['type'] == 'indexer' and task['indexer_type'] == index_type: for ddoc in task['design_documents']: if ddoc == ('_design/%s' % ddoc_name): return task['pid'], False if task['type'] == 'blocked_indexer' and task['indexer_type'] == index_type: for ddoc in task['design_documents']: if ddoc == ('_design/%s' % ddoc_name): return task['pid'], True return None, None def query_view(self, design_doc_name, view_name, bucket, query, timeout=120, invalid_query=False, type="view"): status, content, header = self._query(design_doc_name, view_name, bucket, type, query, timeout) if not status and not invalid_query: stat = 0 if 'status' in header: stat = int(header['status']) raise QueryViewException(view_name, content, status=stat) return json.loads(content) def _query(self, design_doc_name, view_name, bucket, view_type, query, timeout): if design_doc_name.find('/') != -1: design_doc_name = design_doc_name.replace('/', '%2f') if view_name.find('/') != -1: view_name = view_name.replace('/', '%2f') api = self.capiBaseUrl + '%s/_design/%s/_%s/%s?%s' % (bucket, design_doc_name, view_type, view_name, urllib.parse.urlencode(query)) if isinstance(bucket, Bucket): api = self.capiBaseUrl + '%s/_design/%s/_%s/%s?%s' % (bucket.name, design_doc_name, view_type, view_name, urllib.parse.urlencode(query)) log.info("index query url: {0}".format(api)) status, content, header = self._http_request(api, headers=self._create_capi_headers(), timeout=timeout) return status, content, header def view_results(self, bucket, ddoc_name, params, limit=100, timeout=120, view_name=None): status, json = self._index_results(bucket, "view", ddoc_name, params, limit, timeout=timeout, view_name=view_name) if not status: raise Exception("unable to obtain view results") return json # DEPRECATED: Incorrectly named function kept for backwards compatibility. def get_view(self, bucket, view): log.info("DEPRECATED function get_view(" + view + "). use get_ddoc()") return self.get_ddoc(bucket, view) def get_data_path(self): node_info = self.get_nodes_self() data_path = node_info.storage[0].get_data_path() return data_path def get_index_path(self): node_info = self.get_nodes_self() data_path = node_info.storage[0].get_index_path() return data_path def get_memcached_port(self): node_info = self.get_nodes_self() return node_info.memcached def get_ddoc(self, bucket, ddoc_name): status, json, meta = self._get_design_doc(bucket, ddoc_name) if not status: raise ReadDocumentException(ddoc_name, json) return json, meta # the same as Preview a Random Document on UI def get_random_key(self, bucket): api = self.baseUrl + 'pools/default/buckets/%s/localRandomKey' % bucket status, content, header = self._http_request(api, headers=self._create_capi_headers()) json_parsed = json.loads(content) if not status: raise Exception("unable to get random document/key for bucket %s" % bucket) return json_parsed def create_scope(self, bucket, scope, params=None, num_retries=3): api = self.baseUrl + 'pools/default/buckets/%s/scopes' % bucket body = {'name': scope} if params: body.update(params) params = urllib.parse.urlencode(body) headers = self._create_headers() while num_retries > 0: status, content, header = self._http_request(api, 'POST', params=params, headers=headers) log.info("{0} with params: {1}".format(api, params)) if status: json_parsed = json.loads(content) log.info("Scope created {}->{} {}".format(bucket, scope, json_parsed)) break elif header["status"] == "400": log.info("Scope already exists. Skipping create {}->{}".format(bucket, scope)) break else: time.sleep(10) num_retries -= 1 else: raise Exception("Create scope failed : status:{0},content:{1}".format(status, content)) return status def _create_single_collection(self, bucket, scope, collection, params=None): api = self.baseUrl + 'pools/default/buckets/%s/scopes/%s/collections' % (bucket, scope) body = {'name': collection} if params: body.update(params) params = urllib.parse.urlencode(body) headers = self._create_headers() status, content, header = self._http_request(api, 'POST', params=params, headers=headers) log.info("{0} with params: {1}".format(api, params)) return status,content,header def create_collection(self, bucket, scope, collection, params=None, num_retries=3): if not isinstance(collection, list): collection = [collection] for c in collection: while num_retries > 0: status, content, header = self._create_single_collection(bucket, scope, c, params) if status: json_parsed = json.loads(content) log.info("Collection created {}->{}->{} manifest:{}".format(bucket, scope, c, json_parsed)) break elif header["status"] == "400": log.info("Collection already exists. Skipping create {}->{}-{}".format(bucket, scope, c)) break else: time.sleep(10) num_retries -= 1 else: raise Exception("Create collection failed : status:{0},content:{1}".format(status, content)) return status def put_collection_scope_manifest(self, bucket, manifest, ensure_manifest=True): """ Put collection scope manifest to bulk update collection/scopes Args: ensure_manifest (bool): If set, blocks until the manifest has been applied to all nodes as the endpoint is asynchronous. """ if isinstance(bucket, Bucket): bucket = bucket.name params, headers = json.dumps(manifest), self._create_capi_headers() status, content, _ = self._http_request(f"{self.baseUrl}pools/default/buckets/{bucket}/scopes", 'PUT', params=params, headers=headers) if ensure_manifest: uid = json.loads(content)['uid'] ensure_manifest_status, manifest_content, _ = self._http_request( f"{self.baseUrl}pools/default/buckets/{bucket}/scopes/@ensureManifest/{uid}", 'POST', headers=headers) return status def get_bucket_manifest(self, bucket): if isinstance(bucket, Bucket): bucket = bucket.name api = '{0}{1}{2}{3}'.format(self.baseUrl, 'pools/default/buckets/', bucket, '/scopes') status, content, header = self._http_request(api) if status: return json.loads(content) else: raise Exception( "Cannot get manifest for bucket {}: status:{}, content:{}".format(bucket, status, content)) def _parse_manifest(self, bucket, extract=None): try: manifest = self.get_bucket_manifest(bucket) scopes = [] collections = [] for scope in manifest["scopes"]: scopes.append(scope["name"]) for collection in scope["collections"]: collections.append(collection["name"]) if extract == "scopes": return scopes elif extract == "collections": return collections except Exception as e: raise Exception("Cannot extract {} for bucket {} from manifest {}".format(extract, bucket, e.message)) def get_bucket_scopes(self, bucket): return self._parse_manifest(bucket, "scopes") def get_bucket_collections(self, bucket): return self._parse_manifest(bucket, "collections") def get_scope_collections(self, bucket, scope): try: manifest = self.get_bucket_manifest(bucket) scope_found = False collections_in_scope = [] for scopes in manifest["scopes"]: if scopes['name'] == scope: scope_found = True for collection in scopes['collections']: collections_in_scope.append(collection['name']) if not scope_found: log.error("Cannot get collections for scope {} because it does not exist".format(scope)) return collections_in_scope except Exception as e: raise Exception("Cannot get collections for bucket {}-> scope{} {}".format(bucket, scope, e.message)) def delete_scope(self, bucket, scope): api = self.baseUrl + 'pools/default/buckets/%s/scopes/%s' % (bucket, scope) headers = self._create_headers() status, content, header = self._http_request(api, 'DELETE', headers=headers) log.info("{0}".format(api)) return status def get_rest_endpoint_data(self, endpoint=None, ip=None, port=None): protocol = "http" if CbServer.use_https: port = CbServer.ssl_port_map.get(str(port), str(port)) protocol = "https" endpoint_base_url = "{0}://{1}:{2}/".format(protocol, ip, port) api = str(endpoint_base_url) + str(endpoint) print(f'Executing GET on: {api}') headers = self._create_headers() status, content, header = self._http_request(api, 'GET', headers=headers) return status, content def delete_collection(self, bucket, scope, collection): api = self.baseUrl + 'pools/default/buckets/%s/scopes/%s/collections/%s' % (bucket, scope, collection) headers = self._create_headers() status, content, header = self._http_request(api, 'DELETE', headers=headers) return status def get_collection(self, bucket): api = self.baseUrl + 'pools/default/buckets/%s/scopes' % bucket headers = self._create_headers() status, content, header = self._http_request(api, 'GET', headers=headers) return status, content def get_collection_uid(self, bucket, scope, collection): try: manifest = self.get_bucket_manifest(bucket) for scopes in manifest["scopes"]: if scopes['name'] == scope: for col in scopes['collections']: if col['name'] == collection: return col['uid'] log.error("Cannot get collection uid because {0}.{1}.{2} does not exist" .format(bucket, scope, collection)) except Exception as e: raise Exception("Exception thrown while getting collection uid {}" .format(e.message)) def run_view(self, bucket, view, name): api = self.capiBaseUrl + '/%s/_design/%s/_view/%s' % (bucket, view, name) status, content, header = self._http_request(api, headers=self._create_capi_headers()) json_parsed = json.loads(content) if not status: raise Exception("unable to create view") return json_parsed def delete_view(self, bucket, view): status, json = self._delete_design_doc(bucket, view) if not status: raise Exception("unable to delete the view") return json def spatial_results(self, bucket, spatial, params, limit=100): status, json = self._index_results(bucket, "spatial", spatial, params, limit) if not status: raise Exception("unable to obtain spatial view results") return json def create_spatial(self, bucket, spatial, function): status, json = self._create_design_doc(bucket, spatial, function) if status == False: raise Exception("unable to create spatial view") return json def get_spatial(self, bucket, spatial): status, json, meta = self._get_design_doc(bucket, spatial) if not status: raise Exception("unable to get the spatial view definition") return json, meta def delete_spatial(self, bucket, spatial): status, json = self._delete_design_doc(bucket, spatial) if not status: raise Exception("unable to delete the spatial view") return json # type_ is "view" or "spatial" def _index_results(self, bucket, type_, ddoc_name, params, limit, timeout=120, view_name=None): if view_name is None: view_name = ddoc_name query = '/{0}/_design/{1}/_{2}/{3}' api = self.capiBaseUrl + query.format(bucket, ddoc_name, type_, view_name) num_params = 0 if limit != None: num_params = 1 api += "?limit={0}".format(limit) for param in params: if num_params > 0: api += "&" else: api += "?" num_params += 1 if param in ["key", "startkey", "endkey", "start_range", "end_range"] or isinstance(params[param], bool): api += "{0}={1}".format(param, json.dumps(params[param], separators=(',', ':'))) else: api += "{0}={1}".format(param, params[param]) log.info("index query url: {0}".format(api)) status, content, header = self._http_request(api, headers=self._create_capi_headers(), timeout=timeout) json_parsed = json.loads(content) return status, json_parsed def get_couch_doc(self, doc_id, bucket="default", timeout=120): """ use couchBase uri to retrieve document from a bucket """ api = self.capiBaseUrl + '/%s/%s' % (bucket, doc_id) status, content, header = self._http_request(api, headers=self._create_capi_headers(), timeout=timeout) if not status: raise ReadDocumentException(doc_id, content) return json.loads(content) def _create_design_doc(self, bucket, name, function): api = self.capiBaseUrl + '/%s/_design/%s' % (bucket, name) status, content, header = self._http_request( api, 'PUT', function, headers=self._create_capi_headers()) json_parsed = json.loads(content) return status, json_parsed def _get_design_doc(self, bucket, name): api = self.capiBaseUrl + '/%s/_design/%s' % (bucket, name) if isinstance(bucket, Bucket): api = self.capiBaseUrl + '/%s/_design/%s' % (bucket.name, name) status, content, header = self._http_request(api, headers=self._create_capi_headers()) json_parsed = json.loads(content.decode()) meta_parsed = "" if status: # in dp4 builds meta data is in content, not in header if 'X-Couchbase-Meta' in header: meta = header['X-Couchbase-Meta'] meta_parsed = json.loads(meta) elif 'x-couchbase-meta' in header: meta = header['x-couchbase-meta'] meta_parsed = json.loads(meta) else: meta_parsed = {} try: meta_parsed["_rev"] = json_parsed["_rev"] meta_parsed["_id"] = json_parsed["_id"] except KeyError: pass return status, json_parsed, meta_parsed def _delete_design_doc(self, bucket, name): status, design_doc, meta = self._get_design_doc(bucket, name) if not status: raise Exception("unable to find for deletion design document") api = self.capiBaseUrl + '/%s/_design/%s' % (bucket, name) if isinstance(bucket, Bucket): api = self.capiBaseUrl + '/%s/_design/%s' % (bucket.name, name) status, content, header = self._http_request(api, 'DELETE', headers=self._create_capi_headers()) json_parsed = json.loads(content) return status, json_parsed def spatial_compaction(self, bucket, design_name): api = self.capiBaseUrl + '/%s/_design/%s/_spatial/_compact' % (bucket, design_name) if isinstance(bucket, Bucket): api = self.capiBaseUrl + \ '/%s/_design/%s/_spatial/_compact' % (bucket.name, design_name) status, content, header = self._http_request(api, 'POST', headers=self._create_capi_headers()) json_parsed = json.loads(content) return status, json_parsed # Make a _design/_info request def set_view_info(self, bucket, design_name): """Get view diagnostic info (node specific)""" api = self.capiBaseUrl if isinstance(bucket, Bucket): api += '/_set_view/{0}/_design/{1}/_info'.format(bucket.name, design_name) else: api += '_set_view/{0}/_design/{1}/_info'.format(bucket, design_name) status, content, header = self._http_request(api, 'GET', headers=self._create_capi_headers()) if not status: raise SetViewInfoNotFound(design_name, content) json_parsed = json.loads(content) return status, json_parsed # Make a _spatial/_info request def spatial_info(self, bucket, design_name): api = self.capiBaseUrl + \ '/%s/_design/%s/_spatial/_info' % (bucket, design_name) status, content, header = self._http_request( api, 'GET', headers=self._create_capi_headers()) json_parsed = json.loads(content) return status, json_parsed def _create_capi_headers(self): authorization = self.get_authorization(self.username, self.password) return {'Content-Type': 'application/json', 'Authorization': 'Basic %s' % authorization, 'Accept': '*/*'} def _create_capi_headers_with_auth(self, username, password): authorization = self.get_authorization(username, password) return {'Content-Type': 'application/json', 'Authorization': 'Basic %s' % authorization, 'Accept': '*/*'} def _create_headers_with_auth(self, username, password): authorization = self.get_authorization(username, password) return {'Authorization': 'Basic %s' % authorization} # authorization must be a base64 string of username:password def _create_headers(self): authorization = self.get_authorization(self.username, self.password) return {'Content-Type': 'application/x-www-form-urlencoded', 'Authorization': 'Basic %s' % authorization, 'Accept': '*/*'} # authorization must be a base64 string of username:password def _create_headers_encoded_prepared(self): authorization = self.get_authorization(self.username, self.password) return {'Content-Type': 'application/json', 'Authorization': 'Basic %s' % authorization} def _get_auth(self, headers): key = 'Authorization' if key in headers: val = headers[key] if val.startswith("Basic "): try: val = val.encode() return str("auth: " + base64.decodebytes(val[6:]).decode()) except Exception as e: print(e) return "" def _http_request(self, api, method='GET', params='', headers=None, timeout=120, disable_ssl_certificate_validation=True): if not headers: headers = self._create_headers() end_time = time.time() + timeout log.debug("Executing {0} request for following api {1} with Params: {2} and Headers: {3}"\ .format(method, api, params, headers)) count = 1 t1 = 3 while True: try: try: if TestInputSingleton.input.param("debug.api.calls", False): log.info("--->Start calling httplib2.Http({}).request({},{},{},{})".format(timeout,api,headers,method,params)) except AttributeError: pass response, content = httplib2.Http(timeout=timeout, disable_ssl_certificate_validation=disable_ssl_certificate_validation).\ request(api, method, params, headers) try: if TestInputSingleton.input.param("debug.api.calls", False): log.info( "--->End calling httplib2.Http({}).request({},{},{},{})".format(timeout, api, headers, method, params)) except AttributeError: pass if response['status'] in ['200', '201', '202']: return True, content, response else: try: json_parsed = json.loads(content) except ValueError as e: json_parsed = {} json_parsed["error"] = "status: {0}, content: {1}"\ .format(response['status'], content) reason = "unknown" if "error" in json_parsed: reason = json_parsed["error"] message = '{0} {1} body: {2} headers: {3} error: {4} reason: {5} {6} {7}'.\ format(method, api, params, headers, response['status'], reason, str(str(content).rstrip('\n')), self._get_auth(headers)) log.error(message) log.debug(''.join(traceback.format_stack())) return False, content, response except socket.error as e: if count < 4: log.error("socket error while connecting to {0} error {1} ".format(api, e)) if time.time() > end_time: log.error("Giving up due to {2}! Tried {0} connect {1} times.".format( api, count, e)) raise ServerUnavailableException(ip=self.ip) except (AttributeError, httplib2.ServerNotFoundError) as e: if count < 4: log.error("ServerNotFoundError error while connecting to {0} error {1} "\ .format(api, e)) if time.time() > end_time: log.error("Giving up due to {2}! Tried {0} connect {1} times.".\ format(api, count, e)) raise ServerUnavailableException(ip=self.ip) time.sleep(t1) count += 1 t1 *= 2 def init_cluster(self, username='Administrator', password='password', port='8091'): log.info("--> in init_cluster...{},{},{}".format(username,password,port)) api = self.baseUrl + 'settings/web' params = urllib.parse.urlencode({'port': port, 'username': username, 'password': password}) log.info('settings/web params on {0}:{1}:{2}'.format(self.ip, self.port, params)) status, content, header = self._http_request(api, 'POST', params=params) log.info("--> status:{}".format(status)) return status def init_node(self, set_node_services=None): """ need a standalone method to initialize a node that could call anywhere with quota from testconstant """ self.node_services = [] if set_node_services is None: set_node_services = self.services_node_init if set_node_services is None and self.services == "": self.node_services = ["kv"] elif set_node_services is None and self.services != "": self.node_services = self.services.split(",") elif set_node_services is not None: if "-" in set_node_services: self.node_services = set_node_services.split("-") if "," in set_node_services: self.node_services = set_node_services.split(",") kv_quota = 0 while kv_quota == 0: time.sleep(1) kv_quota = int(self.get_nodes_self().mcdMemoryReserved) info = self.get_nodes_self() kv_quota = int(info.mcdMemoryReserved * CLUSTER_QUOTA_RATIO) cb_version = info.version[:5] if cb_version in COUCHBASE_FROM_VERSION_4: if "index" in self.node_services: log.info("quota for index service will be %s MB" % (INDEX_QUOTA)) kv_quota -= INDEX_QUOTA log.info("set index quota to node %s " % self.ip) self.set_service_memoryQuota(service='indexMemoryQuota', memoryQuota=INDEX_QUOTA) if "fts" in self.node_services: log.info("quota for fts service will be %s MB" % (FTS_QUOTA)) kv_quota -= FTS_QUOTA log.info("set both index and fts quota at node %s "% self.ip) self.set_service_memoryQuota(service='ftsMemoryQuota', memoryQuota=FTS_QUOTA) if "cbas" in self.node_services: log.info("quota for cbas service will be %s MB" % (CBAS_QUOTA)) kv_quota -= CBAS_QUOTA self.set_service_memoryQuota(service = "cbasMemoryQuota", memoryQuota=CBAS_QUOTA) kv_quota -= 1 if kv_quota < MIN_KV_QUOTA: raise Exception("KV RAM needs to be more than %s MB" " at node %s" % (MIN_KV_QUOTA, self.ip)) log.info("quota for kv: %s MB" % kv_quota) self.init_cluster_memoryQuota(self.username, self.password, kv_quota) if cb_version in COUCHBASE_FROM_VERSION_4: self.init_node_services(username=self.username, password=self.password, services=self.node_services) self.init_cluster(username=self.username, password=self.password) return kv_quota def init_node_services(self, username='Administrator', password='password', hostname='127.0.0.1', port='8091', services=None): if CbServer.use_https: port = CbServer.ssl_port_map.get(str(port), str(port)) log.info("--> init_node_services({},{},{},{},{})".format(username,password,hostname,port,services)) api = self.baseUrl + '/node/controller/setupServices' if services == None: log.info(" services are marked as None, will not work") return False params_dict = {'user': username, 'password': password, 'services': ",".join(services)} if hostname == "127.0.0.1": hostname = "{0}:{1}".format(hostname, port) params = urllib.parse.urlencode({ 'hostname': hostname, 'user': username, 'password': password, 'services': ",".join(services)}) log.info('/node/controller/setupServices params on {0}: {1}:{2}'.format(self.ip, self.port, params)) status, content, header = self._http_request(api, 'POST', params) error_message = "cannot change node services after cluster is provisioned" if not status and error_message in str(content): status = True log.info("This node is already provisioned with services, we do not consider this as failure for test case") return status def get_cluster_settings(self): settings = {} api = self.baseUrl + 'settings/web' status, content, header = self._http_request(api, 'GET') if status: settings = json.loads(content) log.info('settings/web params on {0}:{1}:{2}'.format(self.ip, self.port, settings)) return settings def init_cluster_memoryQuota(self, username='Administrator', password='password', memoryQuota=256): api = self.baseUrl + 'pools/default' params = urllib.parse.urlencode({'memoryQuota': memoryQuota}) log.info('pools/default params : {0}'.format(params)) status, content, header = self._http_request(api, 'POST', params) return status def set_service_memoryQuota(self, service, username='Administrator', password='password', memoryQuota=256): ''' cbasMemoryQuota for cbas service. ftsMemoryQuota for fts service. indexMemoryQuota for index service.''' api = self.baseUrl + 'pools/default' params = urllib.parse.urlencode({service: memoryQuota}) log.info('pools/default params : {0}'.format(params)) status, content, header = self._http_request(api, 'POST', params) return status def set_cluster_name(self, name): api = self.baseUrl + 'pools/default' if name is None: name = "" params = urllib.parse.urlencode({'clusterName': name}) log.info('pools/default params : {0}'.format(params)) status, content, header = self._http_request(api, 'POST', params) return status def set_indexer_storage_mode(self, username='Administrator', password='password', storageMode='plasma'): """ StorageMode could be plasma or memopt From spock, we replace forestdb with plasma """ api = self.baseUrl + 'settings/indexes' params = urllib.parse.urlencode({'storageMode': storageMode}) error_message = "storageMode must be one of plasma, memory_optimized" log.info('settings/indexes params : {0}'.format(params)) status, content, header = self._http_request(api, 'POST', params) if not status and error_message in content.decode(): #TODO: Currently it just acknowledges if there is an error. #And proceeds with further initialization. log.info(content) return status def set_indexer_num_replica(self, num_replica=0): api = self.index_baseUrl + 'settings' params = {'indexer.settings.num_replica': num_replica} params = json.dumps(params) status, content, header = self._http_request(api, 'POST', params=params, timeout=60) error_message = "" log.info('settings params : {0}'.format(params)) status, content, header = self._http_request(api, 'POST', params) if not status and error_message in content: # TODO: Currently it just acknowledges if there is an error. # And proceeds with further initialization. log.info(content) return status def cleanup_indexer_rebalance(self, server): protocol = "http" if CbServer.use_https: protocol = "https" if server: api = "{0}://{1}:{2}/".format(protocol, server.ip, self.index_port) + 'cleanupRebalance' else: api = self.baseUrl + 'cleanupRebalance' status, content, _ = self._http_request(api, 'GET') if status: return content else: log.error("cleanupRebalance:{0},content:{1}".format(status, content)) raise Exception("indexer rebalance cleanup failed") def list_indexer_rebalance_tokens(self, server): protocol = "http" if CbServer.use_https: protocol = "https" if server: api = "{0}://{1}:{2}/".format(protocol, server.ip, self.index_port) + 'listRebalanceTokens' else: api = self.baseUrl + 'listRebalanceTokens' print(api) status, content, _ = self._http_request(api, 'GET') if status: return content.decode('utf-8') else: log.error("listRebalanceTokens:{0},content:{1}".format(status, content)) raise Exception("list rebalance tokens failed") def wait_until_cbas_is_ready(self, timeout): """ Wait until a http request can be made to the analytics service """ timeout = time.time() + timeout while time.time() < timeout: try: self.execute_statement_on_cbas("SELECT 'hello' as message", None) return True except ServerUnavailableException: self.sleep(1, "Waiting for analytics server to be ready") return False def execute_statement_on_cbas(self, statement, mode, pretty=True, timeout=70, client_context_id=None, username=None, password=None): if not username: username = self.username if not password: password = self.password api = self.cbas_base_url + "/analytics/service" headers = self._create_capi_headers_with_auth(username, password) params = {'statement': statement, 'pretty': pretty, 'client_context_id': client_context_id} if mode is not None: params['mode'] = mode params = json.dumps(params) status, content, header = self._http_request(api, 'POST', headers=headers, params=params, timeout=timeout) if status: return content elif str(header['status']) == '503': log.info("Request Rejected") raise Exception("Request Rejected") elif str(header['status']) in ['500', '400']: json_content = json.loads(content) msg = json_content['errors'][0]['msg'] if "Job requirement" in msg and "exceeds capacity" in msg: raise Exception("Capacity cannot meet job requirement") else: return content else: log.error("/analytics/service status:{0},content:{1}".format( status, content)) raise Exception("Analytics Service API failed") def delete_active_request_on_cbas(self, client_context_id, username=None, password=None): if not username: username = self.username if not password: password = self.password api = self.cbas_base_url + "/analytics/admin/active_requests?client_context_id={0}".format( client_context_id) headers = self._create_capi_headers_with_auth(username, password) status, content, header = self._http_request(api, 'DELETE', headers=headers, timeout=60) if status: return header['status'] elif str(header['status']) == '404': log.info("Request Not Found") return header['status'] else: log.error( "/analytics/admin/active_requests status:{0},content:{1}".format( status, content)) raise Exception("Analytics Admin API failed") def get_cluster_ceritificate(self): api = self.baseUrl + 'pools/default/certificate' status, content, _ = self._http_request(api, 'GET') if status: return content.decode("utf-8") else: log.error("/pools/default/certificate status:{0},content:{1}".format(status, content)) raise Exception("certificate API failed") def regenerate_cluster_certificate(self): api = self.baseUrl + 'controller/regenerateCertificate' status, content, _ = self._http_request(api, 'POST') if status: return content else: log.error("controller/regenerateCertificate status:{0},content:{1}".format(status, content)) raise Exception("regenerateCertificate API failed") def __remote_clusters(self, api, op, remoteIp, remotePort, username, password, name, demandEncryption=0, certificate='', encryptionType="half"): param_map = {'hostname': "{0}:{1}".format(remoteIp, remotePort), 'username': username, 'password': password, 'name':name} from TestInput import TestInputServer remote = TestInputServer() remote.ip = remoteIp remote.rest_username = username remote.rest_password = password remote.port = remotePort if demandEncryption: param_map ['demandEncryption'] = 'on' if certificate != '': param_map['certificate'] = certificate if self.check_node_versions("5.5") and RestConnection(remote).check_node_versions("5.5"): # 5.5.0 and above param_map['secureType'] = encryptionType elif self.check_node_versions("5.0") and RestConnection(remote).check_node_versions("5.0"): param_map['encryptionType'] = encryptionType params = urllib.parse.urlencode(param_map) retries = 5 while retries: status, content, _ = self._http_request(api, 'POST', params) # sample response : # [{"name":"two","uri":"/pools/default/remoteClusters/two","validateURI":"/pools/default/remoteClusters/two?just_validate=1","hostname":"127.0.0.1:9002","username":"Administrator"}] remoteCluster = json.loads(content) if status or "Duplicate cluster" in remoteCluster["_"]: return remoteCluster retries -= 1 raise Exception("remoteCluster API '{0} remote cluster' failed".format(op)) def add_remote_cluster(self, remoteIp, remotePort, username, password, name, demandEncryption=0, certificate='', encryptionType="full"): # example : password:password username:Administrator hostname:127.0.0.1:9002 name:two msg = "adding remote cluster hostname:{0}:{1} with username:password {2}:{3} name:{4} to source node: {5}:{6}" log.info(msg.format(remoteIp, remotePort, username, password, name, self.ip, self.port)) api = self.baseUrl + 'pools/default/remoteClusters' return self.__remote_clusters(api, 'add', remoteIp, remotePort, username, password, name, demandEncryption, certificate, encryptionType) def add_remote_cluster_new(self, remoteIp, remotePort, username, password, name, demandEncryption=0, certificate=''): # example : password:password username:Administrator hostname:127.0.0.1:9002 name:two msg = "adding remote cluster hostname:{0}:{1} with username:password {2}:{3} name:{4} to source node: {5}:{6}" log.info(msg.format(remoteIp, remotePort, username, password, name, self.ip, self.port)) api = self.baseUrl + 'pools/default/remoteClusters' return self.__remote_clusters(api, 'add', remoteIp, remotePort, username, password, name, demandEncryption, certificate) def modify_remote_cluster(self, remoteIp, remotePort, username, password, name, demandEncryption=0, certificate='', encryptionType="half"): log.info("modifying remote cluster name:{0}".format(name)) api = self.baseUrl + 'pools/default/remoteClusters/' + urllib.parse.quote(name) return self.__remote_clusters(api, 'modify', remoteIp, remotePort, username, password, name, demandEncryption, certificate, encryptionType) def get_remote_clusters(self): remote_clusters = [] api = self.baseUrl + 'pools/default/remoteClusters/' params = urllib.parse.urlencode({}) status, content, header = self._http_request(api, 'GET', params) if status: remote_clusters = json.loads(content) return remote_clusters def remove_all_remote_clusters(self): remote_clusters = self.get_remote_clusters() for remote_cluster in remote_clusters: try: if remote_cluster["deleted"] == False: self.remove_remote_cluster(remote_cluster["name"]) except KeyError: # goxdcr cluster references will not contain "deleted" field self.remove_remote_cluster(remote_cluster["name"]) def remove_remote_cluster(self, name): # example : name:two msg = "removing remote cluster name:{0}".format(urllib.parse.quote(name)) log.info(msg) api = self.baseUrl + 'pools/default/remoteClusters/{0}?'.format(urllib.parse.quote(name)) params = urllib.parse.urlencode({}) status, content, header = self._http_request(api, 'DELETE', params) #sample response : "ok" if not status: log.error("failed to remove remote cluster: status:{0},content:{1}".format(status, content)) raise Exception("remoteCluster API 'remove cluster' failed") # replicationType:continuous toBucket:default toCluster:two fromBucket:default # defaults at https://github.com/couchbase/goxdcr/metadata/replication_settings.go#L20-L33 def start_replication(self, replicationType, fromBucket, toCluster, rep_type="xmem", toBucket=None, xdcr_params={}): toBucket = toBucket or fromBucket msg = "starting {0} replication type:{1} from {2} to {3} in the remote" \ " cluster {4} with settings {5}" log.info(msg.format(replicationType, rep_type, fromBucket, toBucket, toCluster, xdcr_params)) api = self.baseUrl + 'controller/createReplication' param_map = {'replicationType': replicationType, 'toBucket': toBucket, 'fromBucket': fromBucket, 'toCluster': toCluster, 'type': rep_type} param_map.update(xdcr_params) params = urllib.parse.urlencode(param_map) retries = 3 while retries: try: status, content, header = self._http_request(api, 'POST', params) # response : {"id": "replication_id"} json_parsed = json.loads(content) log.info("Replication created with id: {0}".format(json_parsed['id'])) return json_parsed['id'] except ValueError: time.sleep(10) retries -= 1 except: raise Exception("create replication failed: status:{0},content:{1}".format(status, content)) def get_replications(self): replications = [] content = self.ns_server_tasks() for item in content: if not isinstance(item, dict): log.error("Unexpected error while retrieving pools/default/tasks : {0}".format(content)) raise Exception("Unexpected error while retrieving pools/default/tasks : {0}".format(content)) if item["type"] == "xdcr": replications.append(item) return replications def remove_all_replications(self): replications = self.get_replications() for replication in replications: self.stop_replication(replication["cancelURI"]) def stop_replication(self, uri): log.info("Deleting replication {0}".format(uri)) api = self.baseUrl[:-1] + uri retries = 3 while retries: status, content, header = self._http_request(api, 'DELETE') if status: log.info("Replication deleted successfully") return else: retries -= 1 time.sleep(10) raise Exception("delete replication failed: status:{0}, content:{1}".format(status, content)) def remove_all_recoveries(self): recoveries = [] content = self.ns_server_tasks() for item in content: if item["type"] == "recovery": recoveries.append(item) for recovery in recoveries: api = self.baseUrl + recovery["stopURI"] status, content, header = self._http_request(api, 'POST') if not status: raise CBRecoveryFailedException("impossible to stop cbrecovery by {0}".format(api)) log.info("recovery stopped by {0}".format(api)) # params serverIp : the server to add to this cluster # raises exceptions when # unauthorized user # server unreachable # can't add the node to itself ( TODO ) # server already added # returns otpNode def add_node(self, user='', password='', remoteIp='', port='8091', zone_name='', services=None): otpNode = None protocol = "http" if CbServer.use_https or CbServer.n2n_encryption: port = CbServer.ssl_port protocol = "https" # if ip format is ipv6 and enclosing brackets are not found, # enclose self.ip and remoteIp if self.ip.count(':') and self.ip[0] != '[': self.ip = '[' + self.ip + ']' if remoteIp.count(':') and remoteIp[0] != '[': remoteIp = '[' + remoteIp + ']' log.info('adding remote node @{0}:{1} to this cluster @{2}:{3}'\ .format(remoteIp, port, self.ip, self.port)) if zone_name == '': api = self.baseUrl + 'controller/addNode' else: api = self.baseUrl + 'pools/default/serverGroups' if self.is_zone_exist(zone_name): zones = self.get_zone_names() api = "/".join((api, zones[zone_name], "addNode")) log.info("node {0} will be added to zone {1}".format(remoteIp, zone_name)) else: raise Exception("There is not zone with name: %s in cluster" % zone_name) params = urllib.parse.urlencode({'hostname': "{0}://{1}:{2}".format(protocol, remoteIp, port), 'user': user, 'password': password}) if services != None: services = ','.join(services) params = urllib.parse.urlencode({'hostname': "{0}://{1}:{2}".format(protocol, remoteIp, port), 'user': user, 'password': password, 'services': services}) if self.monitorRebalance(): status, content, header = self._http_request(api, 'POST', params) if status: json_parsed = json.loads(content) otpNodeId = json_parsed['otpNode'] otpNode = OtpNode(otpNodeId) if otpNode.ip == '127.0.0.1': otpNode.ip = self.ip else: self.print_UI_logs() try: # print logs from node that we want to add wanted_node = deepcopy(self) wanted_node.ip = remoteIp wanted_node.print_UI_logs() except Exception as ex: self.log(ex) if content.find(b'Prepare join failed. Node is already part of cluster') >= 0: raise ServerAlreadyJoinedException(nodeIp=self.ip, remoteIp=remoteIp) elif content.find(b'Prepare join failed. Joining node to itself is not allowed') >= 0: raise ServerSelfJoinException(nodeIp=self.ip, remoteIp=remoteIp) else: log.error('add_node error : {0}'.format(content)) raise AddNodeException(nodeIp=self.ip, remoteIp=remoteIp, reason=content) else: raise AddNodeException(nodeIp=self.ip, remoteIp=remoteIp, reason="Rebalance error, cannot add node") return otpNode # params serverIp : the server to add to this cluster # raises exceptions when # unauthorized user # server unreachable # can't add the node to itself ( TODO ) # server already added # returns otpNode def do_join_cluster(self, user='', password='', remoteIp='', port='8091', zone_name='', services=None): otpNode = None if CbServer.use_https: port = CbServer.ssl_port log.info('adding remote node @{0}:{1} to this cluster @{2}:{3}'\ .format(remoteIp, port, self.ip, self.port)) api = self.baseUrl + '/node/controller/doJoinCluster' params = urllib.parse.urlencode({'hostname': "{0}:{1}".format(remoteIp, port), 'user': user, 'password': password}) if services != None: services = ','.join(services) params = urllib.parse.urlencode({'hostname': "{0}:{1}".format(remoteIp, port), 'user': user, 'password': password, 'services': services}) status, content, header = self._http_request(api, 'POST', params) if status: json_parsed = json.loads(content) otpNodeId = json_parsed['otpNode'] otpNode = OtpNode(otpNodeId) if otpNode.ip == '127.0.0.1': otpNode.ip = self.ip else: self.print_UI_logs() try: # print logs from node that we want to add wanted_node = deepcopy(self) wanted_node.ip = remoteIp wanted_node.print_UI_logs() except Exception as ex: self.log(ex) if content.find('Prepare join failed. Node is already part of cluster') >= 0: raise ServerAlreadyJoinedException(nodeIp=self.ip, remoteIp=remoteIp) elif content.find('Prepare join failed. Joining node to itself is not allowed') >= 0: raise ServerSelfJoinException(nodeIp=self.ip, remoteIp=remoteIp) else: log.error('add_node error : {0}'.format(content)) raise AddNodeException(nodeIp=self.ip, remoteIp=remoteIp, reason=content) return otpNode def eject_node(self, user='', password='', otpNode=None): if not otpNode: log.error('otpNode parameter required') return False api = self.baseUrl + 'controller/ejectNode' params = urllib.parse.urlencode({'otpNode': otpNode, 'user': user, 'password': password}) status, content, header = self._http_request(api, 'POST', params) if status: log.info('ejectNode successful') else: if content.find('Prepare join failed. Node is already part of cluster') >= 0: raise ServerAlreadyJoinedException(nodeIp=self.ip, remoteIp=otpNode) else: # TODO : raise an exception here log.error('eject_node error {0}'.format(content)) return True def force_eject_node(self): self.diag_eval("gen_server:cast(ns_cluster, leave).") self.check_delay_restart_coucbase_server() """ when we do reset couchbase server by force reject, couchbase server will not down right away but delay few seconds to be down depend on server spec. This fx will detect that delay and return true when couchbase server down and up again after force reject """ def check_delay_restart_coucbase_server(self): api = self.baseUrl + 'nodes/self' headers = self._create_headers() break_out = 0 count_cbserver_up = 0 while break_out < 60 and count_cbserver_up < 2: try: response, content = httplib2.Http(timeout=120).request(api, 'GET', '', headers) if response['status'] in ['200', '201', '202'] and count_cbserver_up == 0: log.info("couchbase server is up but down soon.") time.sleep(1) break_out += 1 # time needed for couchbase server reload after reset config if break_out == 7: log.info("couchbase server may be up already") count_cbserver_up = 1 elif response['status'] in ['200', '201', '202']: count_cbserver_up = 2 log.info("couchbase server is up again in few seconds") time.sleep(7) except (socket.error, AttributeError) as e: log.info("couchbase server is down. Waiting for couchbase server up") time.sleep(2) break_out += 1 count_cbserver_up = 1 pass if break_out >= 60: raise Exception("Couchbase server did not start after 60 seconds") def fail_over(self, otpNode=None, graceful=False): if otpNode is None: log.error('otpNode parameter required') return False api = self.baseUrl + 'controller/failOver' if graceful: api = self.baseUrl + 'controller/startGracefulFailover' params = urllib.parse.urlencode({'otpNode': otpNode}) status, content, header = self._http_request(api, 'POST', params) if status: log.info('fail_over node {0} successful'.format(otpNode)) else: log.error('fail_over node {0} error : {1}'.format(otpNode, content)) raise FailoverFailedException(content) return status def set_recovery_type(self, otpNode=None, recoveryType=None): log.info("Going to set recoveryType={0} for node :: {1}".format(recoveryType, otpNode)) if otpNode is None: log.error('otpNode parameter required') return False if recoveryType is None: log.error('recoveryType is not set') return False api = self.baseUrl + 'controller/setRecoveryType' params = urllib.parse.urlencode({'otpNode': otpNode, 'recoveryType': recoveryType}) status, content, header = self._http_request(api, 'POST', params) if status: log.info('recoveryType for node {0} set successful'.format(otpNode)) else: log.error('recoveryType node {0} not set with error : {1}'.format(otpNode, content)) raise SetRecoveryTypeFailed(content) return status def add_back_node(self, otpNode=None): if otpNode is None: log.error('otpNode parameter required') return False api = self.baseUrl + 'controller/reAddNode' params = urllib.parse.urlencode({'otpNode': otpNode}) status, content, header = self._http_request(api, 'POST', params) if status: log.info('add_back_node {0} successful'.format(otpNode)) else: log.error('add_back_node {0} error : {1}'.format(otpNode, content)) raise InvalidArgumentException('controller/reAddNode', parameters=params) return status def rebalance(self, otpNodes=[], ejectedNodes=[], deltaRecoveryBuckets=None): knownNodes = ','.join(otpNodes) ejectedNodesString = ','.join(ejectedNodes) if deltaRecoveryBuckets == None: params = {'knownNodes': knownNodes, 'ejectedNodes': ejectedNodesString, 'user': self.username, 'password': self.password} else: deltaRecoveryBuckets = ",".join(deltaRecoveryBuckets) params = {'knownNodes': knownNodes, 'ejectedNodes': ejectedNodesString, 'deltaRecoveryBuckets': deltaRecoveryBuckets, 'user': self.username, 'password': self.password} log.info('rebalance params : {0}'.format(params)) params = urllib.parse.urlencode(params) api = self.baseUrl + "controller/rebalance" status, content, header = self._http_request(api, 'POST', params) if status: log.info('rebalance operation started') else: log.error('rebalance operation failed: {0}'.format(content)) # extract the error raise InvalidArgumentException('controller/rebalance with error message {0}'.format(content), parameters=params) return status def diag_eval(self, code, print_log=True): api = '{0}{1}'.format(self.baseUrl, 'diag/eval/') status, content, header = self._http_request(api, "POST", code) if content: try: content = content.decode('utf-8') except (UnicodeDecodeError, AttributeError): pass if print_log: log.info("/diag/eval status on {0}:{1}: {2} content: {3} command: {4}". format(self.ip, self.port, status, content, code)) return status, content def set_chk_max_items(self, max_items): status, content = self.diag_eval("ns_config:set(chk_max_items, " + str(max_items) + ")") return status, content def set_chk_period(self, period): status, content = self.diag_eval("ns_config:set(chk_period, " + str(period) + ")") return status, content def set_enable_flow_control(self, flow=True, bucket='default'): flow_control = "false" if flow: flow_control = "true" code = "ns_bucket:update_bucket_props(\"" + bucket + "\", [{extra_config_string, \"upr_enable_flow_control=" + flow_control + "\"}])" status, content = self.diag_eval(code) return status, content def change_flusher_total_batch_limit(self, flusher_total_batch_limit=3, bucket='default'): code = "ns_bucket:update_bucket_props(\"" + bucket \ + "\", [{extra_config_string, " \ + "\"flusher_total_batch_limit=" \ + str(flusher_total_batch_limit) + "\"}])." status, content = self.diag_eval(code) return status, content def diag_master_events(self): api = '{0}{1}'.format(self.baseUrl, 'diag/masterEvents?o=1') status, content, header = self._http_request(api, "GET") log.info("diag/masterEvents?o=1 status: {0} content: {1}".format(status, content)) return status, content def get_admin_credentials(self): code = 'ns_config:search_node_prop(node(), ns_config:latest(), memcached, admin_user)' status, id = self.diag_eval(code) code = 'ns_config:search_node_prop(node(), ns_config:latest(), memcached, admin_pass)' status, password = self.diag_eval(code) return id.strip('"'), password.strip('"') def monitorRebalance(self, stop_if_loop=True): start = time.time() progress = 0 retry = 0 same_progress_count = 0 previous_progress = 0 while progress != -1 and (progress != 100 or \ self._rebalance_progress_status() == 'running') and retry < 20: # -1 is error , -100 means could not retrieve progress progress = self._rebalance_progress() if progress == -100: log.error("unable to retrieve rebalanceProgress.try again in 1 second") retry += 1 else: retry = 0 if stop_if_loop: # reset same_progress_count if get a different result, # or progress is still O # (it may take a long time until the results are different from 0) if previous_progress != progress or progress == 0: previous_progress = progress same_progress_count = 0 else: same_progress_count += 1 if same_progress_count > 50: log.error("apparently rebalance progress code in infinite loop:" " {0}".format(progress)) return False # sleep 10 seconds to printout less log time.sleep(10) if progress < 0: log.error("rebalance progress code : {0}".format(progress)) return False else: duration = time.time() - start if duration > 10: sleep = 10 else: sleep = duration log.info('rebalance progress took {:.02f} seconds '.format(duration)) log.info("sleep for {0} seconds after rebalance...".format(sleep)) time.sleep(sleep) return True def _rebalance_progress_status(self): api = self.baseUrl + "pools/default/rebalanceProgress" status, content, header = self._http_request(api) json_parsed = json.loads(content) if status: if "status" in json_parsed: return json_parsed['status'] else: return None def _rebalance_status_and_progress(self): """ Returns a 2-tuple capturing the rebalance status and progress, as follows: ('running', progress) - if rebalance is running ('none', 100) - if rebalance is not running (i.e. assumed done) (None, -100) - if there's an error getting the rebalance progress from the server (None, -1) - if the server responds but there's no information on what the status of rebalance is The progress is computed as a average of the progress of each node rounded to 2 decimal places. Throws RebalanceFailedException if rebalance progress returns an error message """ avg_percentage = -1 rebalance_status = None api = self.baseUrl + "pools/default/rebalanceProgress" try: status, content, header = self._http_request(api) except ServerUnavailableException as e: log.error(e) return None, -100 json_parsed = json.loads(content) if status: if "status" in json_parsed: rebalance_status = json_parsed["status"] if "errorMessage" in json_parsed: msg = '{0} - rebalance failed'.format(json_parsed) log.error(msg) self.print_UI_logs() raise RebalanceFailedException(msg) elif rebalance_status == "running": total_percentage = 0 count = 0 for key in json_parsed: if key.find('@') >= 0: ns_1_dictionary = json_parsed[key] percentage = ns_1_dictionary['progress'] * 100 count += 1 total_percentage += percentage if count: avg_percentage = (total_percentage // count) else: avg_percentage = 0 log.info('rebalance percentage : {0:.02f} %'. format(round(avg_percentage, 2))) else: avg_percentage = 100 else: avg_percentage = -100 return rebalance_status, avg_percentage def _rebalance_progress(self): return self._rebalance_status_and_progress()[1] def log_client_error(self, post): api = self.baseUrl + 'logClientError' status, content, header = self._http_request(api, 'POST', post) if not status: log.error('unable to logClientError') return status, content, header def trigger_index_compaction(self, timeout=120): node = None api = self.index_baseUrl + 'triggerCompaction' status, content, header = self._http_request(api, timeout=timeout) if not status: raise Exception(content) def set_index_settings(self, setting_json, timeout=120): api = self.index_baseUrl + 'settings' status, content, header = self._http_request(api, 'POST', json.dumps(setting_json)) if not status: raise Exception(content) log.info("{0} set".format(setting_json)) def set_index_settings_internal(self, setting_json, timeout=120): api = self.index_baseUrl + 'internal/settings' status, content, header = self._http_request(api, 'POST', json.dumps(setting_json)) if not status: if header['status']=='404': log.info("This endpoint is introduced only in 5.5.0, hence not found. Redirecting the request to the old endpoint") self.set_index_settings(setting_json, timeout) else: raise Exception(content) log.info("{0} set".format(setting_json)) def get_index_settings(self, timeout=120): node = None api = self.index_baseUrl + 'settings' status, content, header = self._http_request(api, timeout=timeout) if not status: raise Exception(content) return json.loads(content) def get_index_storage_mode(self, timeout=120): api = self.index_baseUrl + 'settings' status, content, header = self._http_request(api, timeout=timeout) if not status: raise Exception(content) return json.loads(content)["indexer.settings.storage_mode"] def set_index_planner_settings(self, setting, timeout=120): api = self.index_baseUrl + 'settings/planner?{0}'.format(setting) status, content, header = self._http_request(api, timeout=timeout) if not status: raise Exception(content) return json.loads(content) def get_index_stats(self, timeout=120, index_map=None): api = self.index_baseUrl + 'stats' status, content, header = self._http_request(api, timeout=timeout) if status: json_parsed = json.loads(content) index_map = RestParser().parse_index_stats_response(json_parsed, index_map=index_map) return index_map def get_index_stats_collections(self, timeout=120, index_map=None): api = self.index_baseUrl + 'stats' status, content, header = self._http_request(api, timeout=timeout) if status: json_parsed = json.loads(content) index_map = RestParser().parse_index_stats_response_collections(json_parsed, index_map=index_map) return index_map def get_all_index_stats(self, timeout=120, inst_id_filter=[], consumer_filter=None, text=False): """return: json object or text response of :9102/stats""" api = self.index_baseUrl + 'stats' all_index_stats = {} if inst_id_filter: inst_id_filter = json.dumps(inst_id_filter) elif consumer_filter: api += f"?consumerFilter={consumer_filter}" else: inst_id_filter = "" status, content, _ = self._http_request(api, timeout=timeout, params=inst_id_filter) if status: if text: all_index_stats = content.decode("utf8").replace('":', '": ').replace(",", ", ") else: all_index_stats = json.loads(content) return all_index_stats def get_index_official_stats(self, timeout=120, index_map=None, bucket="", scope="", collection=""): api = self.index_baseUrl + 'api/v1/stats' if bucket: api += f'/`{bucket.replace("%", "%25")}`' if scope: api += f'.{scope}' if collection: api += f'.{collection}' status, content, header = self._http_request(api, timeout=timeout) if status: json_parsed = json.loads(content) return json_parsed def get_indexes_count(self): indexes_count = {} index_map = self.get_index_storage_stats() for bucket, indexes in index_map.items(): for index, stats in indexes.items(): indexes_count[index] = stats["MainStore"]["count"] return indexes_count def get_index_storage_stats(self, timeout=120, index_map=None): api = self.index_baseUrl + 'stats/storage' status, content, header = self._http_request(api, timeout=timeout) if not status: raise Exception(content) json_parsed = json.loads(content) index_storage_stats = {} for index_stats in json_parsed: bucket = index_stats["Index"].split(":")[0] index_name = index_stats["Index"].split(":")[-1] if bucket not in list(index_storage_stats.keys()): index_storage_stats[bucket] = {} index_storage_stats[bucket][index_name] = index_stats["Stats"] return index_storage_stats def get_indexer_stats(self, timeout=120, index_map=None, baseUrl=None): if baseUrl is None: api = self.index_baseUrl + 'stats' else: api = baseUrl + 'stats' index_map = {} status, content, header = self._http_request(api, timeout=timeout) if status: json_parsed = json.loads(content) for key in list(json_parsed.keys()): tokens = key.split(":") val = json_parsed[key] if len(tokens) == 1: field = tokens[0] index_map[field] = val return index_map def get_indexer_metadata(self, timeout=120, index_map=None): api = self.index_baseUrl + 'getIndexStatus' index_map = {} status, content, header = self._http_request(api, timeout=timeout) if status: json_parsed = json.loads(content) for key in list(json_parsed.keys()): tokens = key.split(":") val = json_parsed[key] if len(tokens) == 1: field = tokens[0] index_map[field] = val return index_map def get_indexer_internal_stats(self, timeout=120, index_map=None): api = self.index_baseUrl + 'settings?internal=ok' index_map = {} status, content, header = self._http_request(api, timeout=timeout) if status: json_parsed = json.loads(content) for key in list(json_parsed.keys()): tokens = key.split(":") val = json_parsed[key] if len(tokens) == 1: field = tokens[0] index_map[field] = val return index_map def trigger_compaction(self, timeout=120): api = self.index_baseUrl + 'plasmaDiag' command = {'Cmd': 'listDBs'} status, content, header = self._http_request(api, 'POST', json.dumps(command), timeout=timeout) for l in list(iter(str(content, 'utf-8').splitlines())): try: x, id = l.split(" : ") if id: log.info(f'Triggering compaction for instance id {id}') compact_command = {'Cmd': 'compactAll', 'Args': [int(id)]} status, content, header = self._http_request(api, 'POST', json.dumps(compact_command)) if not status: log.error(f'Failed to trigger compaction : {content}') except ValueError: pass def get_index_status(self, timeout=120, index_map=None): api = self.baseUrl + 'indexStatus' index_map = {} status, content, header = self._http_request(api, timeout=timeout) if status: json_parsed = json.loads(content) index_map = RestParser().parse_index_status_response(json_parsed) return index_map def get_index_id_map(self, timeout=120): api = self.baseUrl + 'indexStatus' index_map = {} status, content, header = self._http_request(api, timeout=timeout) if status: json_parsed = json.loads(content) for map in json_parsed["indexes"]: bucket_name = map['bucket'] if bucket_name not in list(index_map.keys()): index_map[bucket_name] = {} index_name = map['index'] index_map[bucket_name][index_name] = {} index_map[bucket_name][index_name]['id'] = map['id'] return index_map def get_index_statements(self, timeout=120): api = self.index_baseUrl + 'getIndexStatement' index_map = {} status, content, header = self._http_request(api, timeout=timeout) if status: json_parsed = json.loads(content) return json_parsed # returns node data for this host def get_nodes_self(self, timeout=120): node = None api = self.baseUrl + 'nodes/self' status, content, header = self._http_request(api, timeout=timeout) if status: json_parsed = json.loads(content) node = RestParser().parse_get_nodes_response(json_parsed) return node def get_ip_from_ini_file(self): """ in alternate address, we need to get hostname from ini file """ return self.ip def node_statuses(self, timeout=120): nodes = [] api = self.baseUrl + 'nodeStatuses' status, content, header = self._http_request(api, timeout=timeout) json_parsed = json.loads(content) if status: for key in json_parsed: # each key contain node info value = json_parsed[key] # Create an OtpNode object given the id and status. # Note the OtpNode object grabs the ip address from the id. node = OtpNode(id=value['otpNode'], status=value['status']) if node.ip == 'cb.local': node.ip = self.ip node.id = node.id.replace('cb.local', self.ip.__str__()) # The ip address grabbed from the id is '127.0.0.1' or '::1' # when the node is not part of a cluster. This can be amended # to the ip address in the TestInputServer object that is # provided. if node.ip in ['127.0.0.1', '[::1]']: node.ip = self.ip node.port = int(key[key.rfind(":") + 1:]) node.replication = value['replication'] if 'gracefulFailoverPossible' in list(value.keys()): node.gracefulFailoverPossible = value['gracefulFailoverPossible'] else: node.gracefulFailoverPossible = False nodes.append(node) return nodes def cluster_status(self): parsed = {} api = self.baseUrl + 'pools/default' status, content, header = self._http_request(api) if status: parsed = json.loads(content) return parsed def fetch_vbucket_map(self, bucket="default"): """Return vbucket map for bucket Keyword argument: bucket -- bucket name """ api = self.baseUrl + 'pools/default/buckets/' + bucket status, content, header = self._http_request(api) _stats = json.loads(content) return _stats['vBucketServerMap']['vBucketMap'] def get_vbucket_map_and_server_list(self, bucket="default"): """ Return server list, replica and vbuckets map that matches to server list """ vbucket_map = self.fetch_vbucket_map(bucket) api = self.baseUrl + 'pools/default/buckets/' + bucket status, content, header = self._http_request(api) _stats = json.loads(content) num_replica = _stats['vBucketServerMap']['numReplicas'] vbucket_map = _stats['vBucketServerMap']['vBucketMap'] servers = _stats['vBucketServerMap']['serverList'] server_list = [] for node in servers: node = node.split(":") server_list.append(node[0]) return vbucket_map, server_list, num_replica def get_pools_info(self): parsed = {} api = self.baseUrl + 'pools' status, content, header = self._http_request(api) json_parsed = json.loads(content) if status: parsed = json_parsed return parsed def get_pools_default(self, query='', timeout=30): parsed = {} api = self.baseUrl + 'pools/default' if query: api += "?" + query status, content, header = self._http_request(api, timeout=timeout) json_parsed = json.loads(content) if status: parsed = json_parsed return parsed def get_cluster_stats(self): """ Reads cluster nodes statistics using `pools/default` rest GET method :return stat_dict - Dictionary of CPU & Memory status each cluster node: """ stat_dict = dict() json_output = self.get_pools_default() if 'nodes' in json_output: for node_stat in json_output['nodes']: stat_dict[node_stat['hostname']] = dict() stat_dict[node_stat['hostname']]['services'] = node_stat['services'] stat_dict[node_stat['hostname']]['cpu_utilization'] = node_stat['systemStats']['cpu_utilization_rate'] stat_dict[node_stat['hostname']]['mem_free'] = node_stat['systemStats']['mem_free'] stat_dict[node_stat['hostname']]['mem_total'] = node_stat['systemStats']['mem_total'] stat_dict[node_stat['hostname']]['swap_mem_used'] = node_stat['systemStats']['swap_used'] stat_dict[node_stat['hostname']]['swap_mem_total'] = node_stat['systemStats']['swap_total'] return stat_dict def get_pools(self): version = None api = self.baseUrl + 'pools' status, content, header = self._http_request(api) json_parsed = json.loads(content) if status: version = MembaseServerVersion(json_parsed['implementationVersion'], json_parsed['componentsVersion']) return version def get_buckets(self, num_retries=3, poll_interval=15): buckets = [] api = '{0}{1}'.format(self.baseUrl, 'pools/default/buckets?basic_stats=true') buckets_are_received = False status = "" content = "" while num_retries > 0: try: # get all the buckets status, content, header = self._http_request(api) json_parsed = json.loads(content) if status: for item in json_parsed: bucketInfo = RestParser().parse_get_bucket_json(item) buckets.append(bucketInfo) buckets_are_received = True break else: log.error("Response status is: False, response content is: {0}".format(content)) num_retries -= 1 time.sleep(poll_interval) except Exception as e: num_retries -= 1 log.error(e) log.error('{0} seconds sleep before calling get_buckets again...'.format(poll_interval)) time.sleep(poll_interval) if not buckets_are_received: log.error("Could not get buckets list from the following api: {0}".format(api)) log.error("Last response status is: {0}".format(status)) log.error("Last response content is: {0}".format(content)) return buckets def get_bucket_by_name(self,bucket_name): # get all the buckets buckets = [] api = '{0}{1}'.format(self.baseUrl, 'pools/default/buckets?basic_stats=true') status, content, header = self._http_request(api) json_parsed = json.loads(content) if status: for item in json_parsed: bucketInfo = RestParser().parse_get_bucket_json(item) if bucketInfo.name == bucket_name: buckets.append(bucketInfo) return buckets def get_buckets_itemCount(self): # get all the buckets bucket_map = {} api = '{0}{1}'.format(self.baseUrl, 'pools/default/buckets?basic_stats=true') status, content, header = self._http_request(api) json_parsed = json.loads(content) if status: for item in json_parsed: bucketInfo = RestParser().parse_get_bucket_json(item) bucket_map[bucketInfo.name] = bucketInfo.stats.itemCount return bucket_map def get_bucket_stats_for_node(self, bucket='default', node=None): if not node: log.error('node_ip not specified') return None stats = {} api = "{0}{1}{2}{3}{4}:{5}{6}".format(self.baseUrl, 'pools/default/buckets/', bucket, "/nodes/", node.ip, node.port, "/stats") status, content, header = self._http_request(api) if status: json_parsed = json.loads(content) op = json_parsed["op"] samples = op["samples"] for stat_name in samples: if stat_name not in stats: if len(samples[stat_name]) == 0: stats[stat_name] = [] else: stats[stat_name] = samples[stat_name][-1] else: raise Exception("Duplicate entry in the stats command {0}".format(stat_name)) return stats def get_node_settings(self, setting_name=None): api = "{0}{1}".format(self.fts_baseUrl, 'api/manager') status, content, header = self._http_request(api) json_parsed = json.loads(content) options_vals = json_parsed['mgr']['options'] if setting_name in options_vals.keys(): return options_vals[setting_name] log.error("Setting {0} not available".format(setting_name)) def get_bucket_status(self, bucket): if not bucket: log.error("Bucket Name not Specified") return None api = self.baseUrl + 'pools/default/buckets' status, content, header = self._http_request(api) if status: json_parsed = json.loads(content) for item in json_parsed: if item["name"] == bucket: return item["nodes"][0]["status"] log.error("Bucket {0} doesn't exist".format(bucket)) return None def fetch_bucket_stats(self, bucket='default', zoom='minute'): """Return deserialized buckets stats. Keyword argument: bucket -- bucket name zoom -- stats zoom level (minute | hour | day | week | month | year) """ api = self.baseUrl + 'pools/default/buckets/{0}/stats?zoom={1}'.format(bucket, zoom) log.info(api) status, content, header = self._http_request(api) return json.loads(content) def set_query_index_api_mode(self, index_api_mode=3): api = self.query_baseUrl + 'admin/settings' query_api_setting = {"max-index-api": index_api_mode} status, content, header = self._http_request(api, 'POST', json.dumps(query_api_setting)) if not status: raise Exception(content) log.info("{0} set".format(query_api_setting)) def fetch_bucket_xdcr_stats(self, bucket='default', zoom='minute'): """Return deserialized bucket xdcr stats. Keyword argument: bucket -- bucket name zoom -- stats zoom level (minute | hour | day | week | month | year) """ api = self.baseUrl + 'pools/default/buckets/@xdcr-{0}/stats?zoom={1}'.format(bucket, zoom) status, content, header = self._http_request(api) return json.loads(content) def fetch_system_stats(self): """Return deserialized system stats.""" api = self.baseUrl + 'pools/default/' status, content, header = self._http_request(api) return json.loads(content) def get_xdc_queue_size(self, bucket): """Fetch bucket stats and return the latest value of XDC replication queue size""" bucket_stats = self.fetch_bucket_xdcr_stats(bucket) return bucket_stats['op']['samples']['replication_changes_left'][-1] def get_dcp_queue_size(self, bucket): """Fetch bucket stats and return the latest value of DCP queue size""" bucket_stats = self.fetch_bucket_stats(bucket) return bucket_stats['op']['samples']['ep_dcp_xdcr_items_remaining'][-1] def get_active_key_count(self, bucket): """Fetch bucket stats and return the bucket's curr_items count""" bucket_stats = self.fetch_bucket_stats(bucket) ret_val = -1 retries = 10 while retries > 0: try: ret_val = bucket_stats['op']['samples']['curr_items'][-1] return ret_val except KeyError as err: log.error(f"get_active_key_count() function for bucket {bucket} reported an error {err}") log.error(f"Corresponding bucket stats JSON is {bucket_stats}") time.sleep(2) retries = retries - 1 return ret_val def get_replica_key_count(self, bucket): """Fetch bucket stats and return the bucket's replica count""" bucket_stats = self.fetch_bucket_stats(bucket) return bucket_stats['op']['samples']['vb_replica_curr_items'][-1] def get_nodes(self, get_all_nodes=False): nodes = [] api = self.baseUrl + 'pools/default' status, content, header = self._http_request(api) count = 0 while not content and count < 7: log.info("sleep 5 seconds and retry") time.sleep(5) status, content, header = self._http_request(api) count += 1 if count == 7: raise Exception("could not get node info after 30 seconds") json_parsed = json.loads(content) if status: if "nodes" in json_parsed: for json_node in json_parsed["nodes"]: node = RestParser().parse_get_nodes_response(json_node) node.rest_username = self.username node.rest_password = self.password if node.ip == "127.0.0.1": node.ip = self.ip # Only add nodes which are active on cluster if get_all_nodes or node.clusterMembership == 'active': nodes.append(node) else: log.info("Node {0} not part of cluster {1}".format(node.ip, node.clusterMembership)) return nodes # this method returns the number of node in cluster def get_cluster_size(self): nodes = self.get_nodes() node_ip = [] for node in nodes: node_ip.append(node.ip) log.info("Number of node(s) in cluster is {0} node(s)".format(len(node_ip))) return len(node_ip) """ this medthod return version on node that is not initialized yet """ def get_nodes_version(self): node = self.get_nodes_self() version = node.version log.info("Node version in cluster {0}".format(version)) return version # this method returns the versions of nodes in cluster def get_nodes_versions(self, logging=True): nodes = self.get_nodes() versions = [] for node in nodes: versions.append(node.version) if logging: log.info("Node versions in cluster {0}".format(versions)) return versions def get_major_version(self): """ Returns the major version of the node (e.g. 6.5) """ return self.get_nodes_self().major_version def check_cluster_compatibility(self, version): """ Check if all nodes in cluster are of versions equal or above the version required. :param version: Version to check the cluster compatibility for. Should be of format major_ver.minor_ver. For example: 5.0, 4.5, 5.1 :return: True if cluster is compatible with the version specified, False otherwise. Return None if cluster is uninitialized. """ nodes = self.get_nodes() if not nodes: # If nodes returned is None, it means that the cluster is not initialized yet and hence cluster # compatibility cannot be found. Return None return None major_ver, minor_ver = version.split(".") compatibility = int(major_ver) * 65536 + int(minor_ver) is_compatible = True for node in nodes: clusterCompatibility = int(node.clusterCompatibility) if clusterCompatibility < compatibility: is_compatible = False return is_compatible # this method returns the services of nodes in cluster - implemented for Sherlock def get_nodes_services(self): nodes = self.get_nodes() map = {} for node in nodes: key = "{0}:{1}".format(node.ip, node.port) map[key] = node.services return map # Check node version def check_node_versions(self, check_version="4.0"): versions = self.get_nodes_versions() if versions[0] < check_version: return False return True def get_bucket_stats(self, bucket='default'): stats = {} status, json_parsed = self.get_bucket_stats_json(bucket) if status: op = json_parsed["op"] samples = op["samples"] for stat_name in samples: if samples[stat_name]: last_sample = len(samples[stat_name]) - 1 if last_sample: stats[stat_name] = samples[stat_name][last_sample] return stats def get_fts_stats(self, index_name=None, bucket_name=None, stat_name=None): """ List of fts stats available as of 03/16/2017 - default:default_idx3:avg_queries_latency: 0, default:default_idx3:batch_merge_count: 0, default:default_idx3:doc_count: 0, default:default_idx3:iterator_next_count: 0, default:default_idx3:iterator_seek_count: 0, default:default_idx3:num_bytes_live_data: 0, default:default_idx3:num_bytes_used_disk: 0, default:default_idx3:num_mutations_to_index: 0, default:default_idx3:num_pindexes: 0, default:default_idx3:num_pindexes_actual: 0, default:default_idx3:num_pindexes_target: 0, default:default_idx3:num_recs_to_persist: 0, default:default_idx3:reader_get_count: 0, default:default_idx3:reader_multi_get_count: 0, default:default_idx3:reader_prefix_iterator_count: 0, default:default_idx3:reader_range_iterator_count: 0, default:default_idx3:timer_batch_store_count: 0, default:default_idx3:timer_data_delete_count: 0, default:default_idx3:timer_data_update_count: 0, default:default_idx3:timer_opaque_get_count: 0, default:default_idx3:timer_opaque_set_count: 0, default:default_idx3:timer_rollback_count: 0, default:default_idx3:timer_snapshot_start_count: 0, default:default_idx3:total_bytes_indexed: 0, default:default_idx3:total_bytes_query_results: 0, default:default_idx3:total_compactions: 0, default:default_idx3:total_queries: 0, default:default_idx3:total_queries_error: 0, default:default_idx3:total_queries_slow: 0, default:default_idx3:total_queries_timeout: 0, default:default_idx3:total_request_time: 0, default:default_idx3:total_term_searchers: 0, default:default_idx3:writer_execute_batch_count: 0, :param index_name: name of the index :param bucket_name: source bucket :param stat_name: any of the above :return: """ api = "{0}{1}".format(self.fts_baseUrl, 'api/nsstats') attempts = 0 while attempts < 5: status, content, header = self._http_request(api) json_parsed = json.loads(content) if bucket_name is None and index_name is None and stat_name is None: return status, content if bucket_name is None and index_name is None: key = stat_name else: key = bucket_name+':'+index_name+':'+stat_name if key in json_parsed: return status, json_parsed[key] attempts += 1 log.info("Stat {0} not available yet".format(stat_name)) time.sleep(1) log.error("ERROR: Stat {0} error on {1} on bucket {2}". format(stat_name, index_name, bucket_name)) def start_fts_index_compaction(self, index_name): api = "{0}{1}".format(self.fts_baseUrl, f'api/index/{index_name}/tasks') params = {"op": "merge"} status, content, header = self._http_request(api, method='POST', params=json.dumps(params, ensure_ascii=False), headers=self._create_capi_headers(), timeout=30) json_parsed = json.loads(content) return status, json_parsed def get_fts_index_compactions(self, index_name): api = "{0}{1}".format(self.fts_baseUrl, f'api/index/{index_name}/tasks') params = {"op": "get"} status, content, header = self._http_request(api, method='POST', params=json.dumps(params, ensure_ascii=False), headers=self._create_capi_headers(), timeout=30) json_parsed = json.loads(content) return status, json_parsed def cancel_fts_index_compaction(self, index_name=None, uuid=None): api = "{0}{1}".format(self.fts_baseUrl, f'api/index/{index_name}/tasks') params = {"op": "cancel", "uuid": uuid} status, content, header = self._http_request(api, method='POST', params=json.dumps(params, ensure_ascii=False), headers=self._create_capi_headers(), timeout=30) json_parsed = json.loads(content) return status, json_parsed def get_bucket_stats_json(self, bucket='default'): stats = {} api = "{0}{1}{2}{3}".format(self.baseUrl, 'pools/default/buckets/', bucket, "/stats") if isinstance(bucket, Bucket): api = '{0}{1}{2}{3}'.format(self.baseUrl, 'pools/default/buckets/', bucket.name, "/stats") status, content, header = self._http_request(api) json_parsed = json.loads(content) return status, json_parsed def get_bucket_json(self, bucket='default'): api = '{0}{1}{2}'.format(self.baseUrl, 'pools/default/buckets/', bucket) if isinstance(bucket, Bucket): api = '{0}{1}{2}'.format(self.baseUrl, 'pools/default/buckets/', bucket.name) status, content, header = self._http_request(api) if not status: raise GetBucketInfoFailed(bucket, content) return json.loads(content) def get_bucket_maxTTL(self, bucket='default'): bucket_info = self.get_bucket_json(bucket=bucket) return bucket_info['maxTTL'] def get_bucket_compressionMode(self, bucket='default'): bucket_info = self.get_bucket_json(bucket=bucket) info = self.get_nodes_self() if 5.5 > float(info.version[:3]): bucket_info['compressionMode'] = "off" return bucket_info['compressionMode'] def is_lww_enabled(self, bucket='default'): bucket_info = self.get_bucket_json(bucket=bucket) try: if bucket_info['conflictResolutionType'] == 'lww': return True except KeyError: return False def get_bucket(self, bucket='default', num_attempt=1, timeout=1): bucketInfo = None try: bucket = bucket.decode() except AttributeError: pass api = '%s%s%s?basic_stats=true' % (self.baseUrl, 'pools/default/buckets/', bucket) if isinstance(bucket, Bucket): api = '%s%s%s?basic_stats=true' % (self.baseUrl, 'pools/default/buckets/', bucket.name) status, content, header = self._http_request(api) num = 1 while not status and num_attempt > num: log.error("try to get {0} again after {1} sec".format(api, timeout)) time.sleep(timeout) status, content, header = self._http_request(api) num += 1 if status: bucketInfo = RestParser().parse_get_bucket_response(content) return bucketInfo def get_vbuckets(self, bucket='default'): b = self.get_bucket(bucket) return None if not b else b.vbuckets def delete_bucket(self, bucket='default', num_retries=3, poll_interval=5): api = '%s%s%s' % (self.baseUrl, 'pools/default/buckets/', bucket) if isinstance(bucket, Bucket): api = '%s%s%s' % (self.baseUrl, 'pools/default/buckets/', bucket.name) status = False while num_retries > 0: try: status, content, header = self._http_request(api, 'DELETE') if int(header['status']) == 500: # According to http://docs.couchbase.com/couchbase-manual-2.5/cb-rest-api/#deleting-buckets # the cluster will return with 500 if it failed to nuke # the bucket on all of the nodes within 30 secs log.warning("Bucket deletion timed out waiting for all nodes, retrying...") num_retries -= 1 time.sleep(poll_interval) else: break except Exception as e: num_retries -= 1 log.error(e) log.error('{0} seconds sleep before calling delete_bucket again...'.format(poll_interval)) time.sleep(poll_interval) return status def delete_all_buckets(self): buckets = self.get_buckets() for bucket in buckets: if isinstance(bucket, Bucket): api = '%s%s%s' % (self.baseUrl, 'pools/default/buckets/', bucket.name) self._http_request(api, 'DELETE') '''Load any of the three sample buckets''' def load_sample(self, sample_name, poll_interval=3, max_wait_time=1200, max_error_retries=3): api = '{0}{1}'.format(self.baseUrl, "sampleBuckets/install") data = '["{0}"]'.format(sample_name) status, content, header = self._http_request(api, 'POST', data) # Allow the sample bucket to be loaded self.wait_until_bucket_loaded(sample_name, poll_interval, max_wait_time, max_error_retries) return status def wait_until_bucket_loaded(self, bucket_name, poll_interval=3, max_wait_time=1200, max_error_retries=3): max_time = time.time() + float(max_wait_time) is_bucket_loaded = False response = "" api = '{0}{1}'.format(self.baseUrl, "pools/default/buckets/{}".format(bucket_name)) previous_doc_count = 0 while time.time() < max_time and max_error_retries > 0: time.sleep(poll_interval) status, content, response = self._http_request(api, method='GET') data = json.loads(content) current_doc_count = int(data["basicStats"]["itemCount"]) if status: if current_doc_count == previous_doc_count: is_bucket_loaded = True break else: previous_doc_count = current_doc_count else: max_error_retries -= 1 log.warning("Something wrong happened while getting bucket {0} items count, retrying.".format(bucket_name)) log.warning("Server response is {0}".format(str(response))) if not is_bucket_loaded: log.error("Bucket {0} was not loaded completely") log.error("Last response is: {0}".format(str(response))) # figure out the proxy port def create_bucket(self, bucket='', ramQuotaMB=1, replicaNumber=1, proxyPort=11211, bucketType='membase', replica_index=1, threadsNumber=3, flushEnabled=1, evictionPolicy='valueOnly', lww=False, maxTTL=None, compressionMode='passive', storageBackend='couchstore'): api = '{0}{1}'.format(self.baseUrl, 'pools/default/buckets') params = urllib.parse.urlencode({}) init_params = {'name': bucket, 'ramQuotaMB': ramQuotaMB, 'replicaNumber': replicaNumber, # 'proxyPort': proxyPort, 'bucketType': bucketType, 'replicaIndex': replica_index, 'threadsNumber': threadsNumber, 'flushEnabled': flushEnabled, 'evictionPolicy': evictionPolicy} if bucketType == "memcached": log.info("Create memcached bucket") # 'replicaNumber' is not valid for memcached buckets init_params.pop("replicaNumber", None) if lww: init_params['conflictResolutionType'] = 'lww' if maxTTL: init_params['maxTTL'] = maxTTL if compressionMode and self.is_enterprise_edition(): init_params['compressionMode'] = compressionMode if bucketType == 'ephemeral': del init_params['replicaIndex'] # does not apply to ephemeral buckets, and is even rejected # bucket storage is applicable only for membase bucket if bucketType == "membase": init_params['storageBackend'] = storageBackend pre_spock = not self.check_cluster_compatibility("5.0") if pre_spock: init_params['proxyPort'] = proxyPort params = urllib.parse.urlencode(init_params) log.info("{0} with param: {1}".format(api, params)) create_start_time = time.time() maxwait = 60 for numsleep in range(maxwait): status, content, header = self._http_request(api, 'POST', params) if status: break elif (int(header['status']) == 503 and '{"_":"Bucket with given name still exists"}'.encode('utf-8') in content): log.info("The bucket still exists, sleep 1 sec and retry") time.sleep(1) else: raise BucketCreationException(ip=self.ip, bucket_name=bucket) if (numsleep + 1) == maxwait: log.error("Tried to create the bucket for {0} secs.. giving up". format(maxwait)) raise BucketCreationException(ip=self.ip, bucket_name=bucket) create_time = time.time() - create_start_time log.info("{0:.02f} seconds to create bucket {1}". format(round(create_time, 2), bucket)) return status def change_bucket_props(self, bucket, ramQuotaMB=None, replicaNumber=None, proxyPort=None, replicaIndex=None, flushEnabled=None, timeSynchronization=None, maxTTL=None, compressionMode=None): api = '{0}{1}{2}'.format(self.baseUrl, 'pools/default/buckets/', bucket) if isinstance(bucket, Bucket): api = '{0}{1}{2}'.format(self.baseUrl, 'pools/default/buckets/', bucket.name) params = urllib.parse.urlencode({}) params_dict = {} existing_bucket = self.get_bucket_json(bucket) if ramQuotaMB: params_dict["ramQuotaMB"] = ramQuotaMB if replicaNumber: params_dict["replicaNumber"] = replicaNumber #if proxyPort: # params_dict["proxyPort"] = proxyPort if replicaIndex: params_dict["replicaIndex"] = replicaIndex if flushEnabled: params_dict["flushEnabled"] = flushEnabled if timeSynchronization: params_dict["timeSynchronization"] = timeSynchronization if maxTTL: params_dict["maxTTL"] = maxTTL if compressionMode and self.is_enterprise_edition(): params_dict["compressionMode"] = compressionMode params = urllib.parse.urlencode(params_dict) log.info("%s with param: %s" % (api, params)) status, content, header = self._http_request(api, 'POST', params) if timeSynchronization: if status: raise Exception("Erroneously able to set bucket settings %s for bucket on time-sync" % (params, bucket)) return status, content if not status: raise Exception("Unable to set bucket settings %s for bucket" % (params, bucket)) log.info("bucket %s updated" % bucket) return status # return AutoFailoverSettings def get_autofailover_settings(self): settings = None api = self.baseUrl + 'settings/autoFailover' status, content, header = self._http_request(api) json_parsed = json.loads(content) if status: settings = AutoFailoverSettings() settings.enabled = json_parsed["enabled"] settings.count = json_parsed["count"] settings.timeout = json_parsed["timeout"] settings.failoverOnDataDiskIssuesEnabled = json_parsed["failoverOnDataDiskIssues"]["enabled"] settings.failoverOnDataDiskIssuesTimeout = json_parsed["failoverOnDataDiskIssues"]["timePeriod"] settings.maxCount = json_parsed["maxCount"] settings.failoverServerGroup = json_parsed["failoverServerGroup"] if json_parsed["canAbortRebalance"]: settings.can_abort_rebalance = json_parsed["canAbortRebalance"] return settings def update_autofailover_settings(self, enabled, timeout, canAbortRebalance=False, enable_disk_failure=False, disk_timeout=120, maxCount=1, enableServerGroup=False): params_dict = {} params_dict['timeout'] = timeout if enabled: params_dict['enabled'] = 'true' else: params_dict['enabled'] = 'false' if canAbortRebalance: params_dict['canAbortRebalance'] = 'true' if enable_disk_failure: params_dict['failoverOnDataDiskIssues[enabled]'] = 'true' params_dict['failoverOnDataDiskIssues[timePeriod]'] = disk_timeout else: params_dict['failoverOnDataDiskIssues[enabled]'] = 'false' params_dict['maxCount'] = maxCount if enableServerGroup: params_dict['failoverServerGroup'] = 'true' else: params_dict['failoverServerGroup'] = 'false' params = urllib.parse.urlencode(params_dict) api = self.baseUrl + 'settings/autoFailover' log.info('settings/autoFailover params : {0}'.format(params)) status, content, header = self._http_request(api, 'POST', params) if not status: log.warning('''failed to change autofailover_settings! See MB-7282. Workaround: wget --user=Administrator --password=asdasd --post-data='rpc:call(mb_master:master_node(), erlang, apply ,[fun () -> erlang:exit(erlang:whereis(mb_master), kill) end, []]).' http://localhost:8091/diag/eval''') return status # return AutoReprovisionSettings def get_autoreprovision_settings(self): settings = None api = self.baseUrl + 'settings/autoReprovision' status, content, header = self._http_request(api) json_parsed = json.loads(content) if status: settings = AutoReprovisionSettings() settings.enabled = json_parsed["enabled"] settings.count = json_parsed["count"] settings.max_nodes = json_parsed["max_nodes"] return settings def update_autoreprovision_settings(self, enabled, maxNodes=1): if enabled: params = urllib.parse.urlencode({'enabled': 'true', 'maxNodes': maxNodes}) else: params = urllib.parse.urlencode({'enabled': 'false', 'maxNodes': maxNodes}) api = self.baseUrl + 'settings/autoReprovision' log.info('settings/autoReprovision params : {0}'.format(params)) status, content, header = self._http_request(api, 'POST', params) if not status: log.error('failed to change autoReprovision_settings!') return status def reset_autofailover(self): api = self.baseUrl + 'settings/autoFailover/resetCount' status, content, header = self._http_request(api, 'POST', '') return status def reset_autoreprovision(self): api = self.baseUrl + 'settings/autoReprovision/resetCount' status, content, header = self._http_request(api, 'POST', '') return status def set_alerts_settings(self, recipients, sender, email_username, email_password, email_host='localhost', email_port=25, email_encrypt='false', alerts='auto_failover_node,auto_failover_maximum_reached'): api = self.baseUrl + 'settings/alerts' params = urllib.parse.urlencode({'enabled': 'true', 'recipients': recipients, 'sender': sender, 'emailUser': email_username, 'emailPass': email_password, 'emailHost': email_host, 'emailPort': email_port, 'emailEncrypt': email_encrypt, 'alerts': alerts}) log.info('settings/alerts params : {0}'.format(params)) status, content, header = self._http_request(api, 'POST', params) return status def get_alerts_settings(self): api = self.baseUrl + 'settings/alerts' status, content, header = self._http_request(api) json_parsed = json.loads(content) if not status: raise Exception("unable to get autofailover alerts settings") return json_parsed def disable_alerts(self): api = self.baseUrl + 'settings/alerts' params = urllib.parse.urlencode({'enabled': 'false'}) log.info('settings/alerts params : {0}'.format(params)) status, content, header = self._http_request(api, 'POST', params) return status def set_cas_drift_threshold(self, bucket, ahead_threshold_in_millisecond, behind_threshold_in_millisecond): api = self.baseUrl + 'pools/default/buckets/{0}'. format( bucket ) params_dict ={'driftAheadThresholdMs': ahead_threshold_in_millisecond, 'driftBehindThresholdMs': behind_threshold_in_millisecond} params = urllib.parse.urlencode(params_dict) log.info("%s with param: %s" % (api, params)) status, content, header = self._http_request(api, 'POST', params) return status def stop_rebalance(self, wait_timeout=10): api = self.baseUrl + '/controller/stopRebalance' status, content, header = self._http_request(api, 'POST') if status: for i in range(int(wait_timeout)): if self._rebalance_progress_status() == 'running': log.warning("rebalance is not stopped yet after {0} sec".format(i + 1)) time.sleep(1) status = False else: log.info("rebalance was stopped") status = True break else: log.error("Rebalance is not stopped due to {0}".format(content)) return status def set_data_path(self, data_path=None, index_path=None, cbas_path=None): end_point = '/nodes/self/controller/settings' api = self.baseUrl + end_point paths = HTTPHeaderDict() set_path = False if data_path: set_path = True paths.add('path', data_path) if index_path: set_path = True paths.add('index_path', index_path) if cbas_path: set_path = True import ast for cbas in ast.literal_eval(cbas_path): paths.add('cbas_path', cbas) if set_path: params = urllib.parse.urlencode(paths) log.info('%s : %s' % (end_point, params)) status, content, header = self._http_request(api, 'POST', params) if status: log.info("Setting data_path: {0}: status {1}".format(data_path, status)) else: log.error("Unable to set data_path {0} : {1}".format(data_path, content)) return status def get_database_disk_size(self, bucket='default'): api = self.baseUrl + "pools/{0}/buckets".format(bucket) status, content, header = self._http_request(api) json_parsed = json.loads(content) # disk_size in MB disk_size = (json_parsed[0]["basicStats"]["diskUsed"]) // (1024 * 1024) return status, disk_size def ddoc_compaction(self, design_doc_id, bucket="default"): api = self.baseUrl + "pools/default/buckets/%s/ddocs/%s/controller/compactView" % \ (bucket, design_doc_id) status, content, header = self._http_request(api, 'POST') if not status: raise CompactViewFailed(design_doc_id, content) log.info("compaction for ddoc '%s' was triggered" % design_doc_id) def check_compaction_status(self, bucket_name): tasks = self.active_tasks() if "error" in tasks: raise Exception(tasks) for task in tasks: log.info("Task is {0}".format(task)) if task["type"] == "bucket_compaction": if task["bucket"] == bucket_name: return True, task["progress"] return False, None def change_memcached_t_option(self, value): cmd = '[ns_config:update_key({node, N, memcached}, fun (PList)' + \ ' -> lists:keystore(verbosity, 1, PList, {verbosity, \'-t ' + str(value) + '\'}) end)' + \ ' || N <- ns_node_disco:nodes_wanted()].' return self.diag_eval(cmd) def set_ensure_full_commit(self, value): """Dynamic settings changes""" # the boolean paramter is used to turn on/off ensure_full_commit(). In XDCR, # issuing checkpoint in this function is expensive and not necessary in some # test, turning off this function would speed up some test. The default value # is ON. cmd = 'ns_config:set(ensure_full_commit_enabled, {0}).'.format(value) return self.diag_eval(cmd) def get_internalSettings(self, param): """allows to get internalSettings values for: indexAwareRebalanceDisabled, rebalanceIndexWaitingDisabled, rebalanceIndexPausingDisabled, maxParallelIndexers, maxParallelReplicaIndexers, maxBucketCount""" api = self.baseUrl + "internalSettings" status, content, header = self._http_request(api) json_parsed = json.loads(content) param = json_parsed[param] return param def set_internalSetting(self, param, value): "Set any internal setting" api = self.baseUrl + "internalSettings" if isinstance(value, bool): value = str(value).lower() params = urllib.parse.urlencode({param : value}) status, content, header = self._http_request(api, "POST", params) log.info('Update internal setting {0}={1}'.format(param, value)) return status def get_replication_for_buckets(self, src_bucket_name, dest_bucket_name): replications = self.get_replications() for replication in replications: if src_bucket_name in replication['source'] and \ replication['target'].endswith(dest_bucket_name): return replication raise XDCRException("Replication with Src bucket: {0} and Target bucket: {1} not found". format(src_bucket_name, dest_bucket_name)) """ By default, these are the global replication settings - { optimisticReplicationThreshold:256, workerBatchSize:500, failureRestartInterval:1, docBatchSizeKb":2048, checkpointInterval":1800, maxConcurrentReps":32} You can override these using set_xdcr_param() """ def set_xdcr_param(self, src_bucket_name, dest_bucket_name, param, value): replication = self.get_replication_for_buckets(src_bucket_name, dest_bucket_name) api = self.baseUrl[:-1] + replication['settingsURI'] value = str(value).lower() params = urllib.parse.urlencode({param: value}) status, content, header = self._http_request(api, "POST", params) if not status: raise XDCRException("Unable to set replication setting {0}={1} on bucket {2} on node {3}". format(param, value, src_bucket_name, self.ip)) else: log.info("Updated {0}={1} on bucket '{2}' on {3}".format(param, value, src_bucket_name, self.ip)) def set_xdcr_params(self, src_bucket_name, dest_bucket_name, param_value_map): replication = self.get_replication_for_buckets(src_bucket_name, dest_bucket_name) api = self.baseUrl[:-1] + replication['settingsURI'] params = urllib.parse.urlencode(param_value_map) status, content, header = self._http_request(api, "POST", params) if not status: raise XDCRException("{0} \n Unable to set replication settings {1} on bucket {2} on node {3}". format(content, param_value_map, src_bucket_name, self.ip)) else: log.info("Updated {0} on bucket '{1}' on {2}".format(param_value_map, src_bucket_name, self.ip)) def set_global_xdcr_param(self, param, value): api = self.baseUrl[:-1] + "/settings/replications" value = str(value).lower() params = urllib.parse.urlencode({param: value}) status, _, _ = self._http_request(api, "POST", params) if not status: raise XDCRException("Unable to set replication setting {0}={1} on node {2}". format(param, value, self.ip)) log.info("Updated {0}={1} on {2}".format(param, value, self.ip)) # Gets per-replication setting value def get_xdcr_param(self, src_bucket_name, dest_bucket_name, param): replication = self.get_replication_for_buckets(src_bucket_name, dest_bucket_name) api = self.baseUrl[:-1] + replication['settingsURI'] status, content, _ = self._http_request(api) if not status: raise XDCRException("Unable to get replication setting {0} on bucket {1} on node {2}". format(param, src_bucket_name, self.ip)) json_parsed = json.loads(content) # when per-replication settings match global(internal) settings, # the param is not returned by rest API # in such cases, return internalSetting value for the param try: return json_parsed[param] except KeyError: if param == 'pauseRequested': return False else: param = 'xdcr' + param[0].upper() + param[1:] log.info("Trying to fetch xdcr param:{0} from global settings". format(param)) return self.get_internalSettings(param) # Returns a boolean value on whether replication def is_replication_paused(self, src_bucket_name, dest_bucket_name): return self.get_xdcr_param(src_bucket_name, dest_bucket_name, 'pauseRequested') def is_replication_paused_by_id(self, repl_id): repl_id = repl_id.replace('/', '%2F') api = self.baseUrl + 'settings/replications/' + repl_id status, content, header = self._http_request(api) if not status: raise XDCRException("Unable to retrieve pause resume status for replication {0}". format(repl_id)) repl_stats = json.loads(content) return repl_stats['pauseRequested'] def pause_resume_repl_by_id(self, repl_id, param, value): repl_id = repl_id.replace('/', '%2F') api = self.baseUrl + 'settings/replications/' + repl_id params = urllib.parse.urlencode({param: value}) status, _, _ = self._http_request(api, "POST", params) if not status: raise XDCRException("Unable to update {0}={1} setting for replication {2}". format(param, value, repl_id)) log.info("Updated {0}={1} on {2}".format(param, value, repl_id)) def get_recent_xdcr_vb_ckpt(self, repl_id): command = 'ns_server_testrunner_api:grab_all_goxdcr_checkpoints().' status, content = self.diag_eval(command, print_log=False) if not status: raise Exception("Unable to get recent XDCR checkpoint information") repl_ckpt_list = json.loads(content) # a single decoding will only return checkpoint record as string # convert string to dict using json chkpt_doc_string = repl_ckpt_list['/ckpt/%s/0' % repl_id].replace('"', '\"') chkpt_dict = json.loads(chkpt_doc_string) return chkpt_dict['checkpoints'][0] def get_repl_stat(self, repl_id, src_bkt="default", stat="data_replicated", timestamp=None): repl_id = repl_id.replace('/', '%2F') api = self.baseUrl + "pools/default/buckets/" + src_bkt + "/stats/replications%2F" \ + repl_id + "%2F" + stat if timestamp: api += "?haveTStamp=" + timestamp status, content, header = self._http_request(api) if not status: raise XDCRException("Unable to retrieve {0} stat for replication {1}". format(stat, repl_id)) repl_stat = json.loads(content) samples = [] for node in self.get_nodes(): items = repl_stat["nodeStats"]["{0}:8091".format(node.ip)] samples.append(items) return samples """ Start of FTS rest apis""" def set_fts_ram_quota(self, value): """set fts ram quota""" api = self.baseUrl + "pools/default" params = urllib.parse.urlencode({"ftsMemoryQuota": value}) status, content, _ = self._http_request(api, "POST", params) if status: log.info("SUCCESS: FTS RAM quota set to {0}mb".format(value)) else: raise Exception("Error setting fts ram quota: {0}".format(content)) return status def set_maxConcurrentPartitionMovesPerNode(self, value): api = self.fts_baseUrl + "api/managerOptions" params = {"maxConcurrentPartitionMovesPerNode": str(value)} status, content, _ = self._http_request(api, "PUT", params=json.dumps(params, ensure_ascii=False), headers=self._create_capi_headers()) if status: log.info("SUCCESS: FTS maxConcurrentPartitionMovesPerNode set to {0}".format(value)) return status def set_disableFileTransferRebalance(self, value): api = self.fts_baseUrl + "api/managerOptions" params = {"disableFileTransferRebalance": str(value)} status, content, _ = self._http_request(api, "PUT", params=json.dumps(params, ensure_ascii=False), headers=self._create_capi_headers()) if status: log.info("SUCCESS: FTS disableFileTransferRebalance set to {0}".format(value)) return status def set_maxFeedsPerDCPAgent(self, value): api = self.fts_baseUrl + "api/managerOptions" params = {"maxFeedsPerDCPAgent": str(value)} status, content, _ = self._http_request(api, "PUT", params=json.dumps(params, ensure_ascii=False), headers=self._create_capi_headers()) if status: log.info("SUCCESS: FTS maxFeedsPerDCPAgent set to {0}".format(value)) return status def set_maxDCPAgents(self, value): api = self.fts_baseUrl + "api/managerOptions" params = {"maxDCPAgents": str(value)} status, content, _ = self._http_request(api, "PUT", params=json.dumps(params, ensure_ascii=False), headers=self._create_capi_headers()) if status: log.info("SUCCESS: FTS maxDCPAgents set to {0}".format(value)) return status def create_fts_index(self, index_name, params): """create or edit fts index , returns {"status":"ok"} on success""" api = self.fts_baseUrl + "api/index/{0}".format(index_name) log.info(json.dumps(params)) status, content, header = self._http_request(api, 'PUT', json.dumps(params, ensure_ascii=False), headers=self._create_capi_headers(), timeout=30) if status: log.info("Index {0} created".format(index_name)) else: raise Exception("Error creating index: {0}".format(content)) return status def update_fts_index(self, index_name, index_def): api = self.fts_baseUrl + "api/index/{0}".format(index_name) log.info(json.dumps(index_def, indent=3)) status, content, header = self._http_request(api, 'PUT', json.dumps(index_def, ensure_ascii=False), headers=self._create_capi_headers(), timeout=30) if status: log.info("Index/alias {0} updated".format(index_name)) else: raise Exception("Error updating index: {0}".format(content)) return status def get_fts_index_definition(self, name, timeout=30): """ get fts index/alias definition """ json_parsed = {} api = self.fts_baseUrl + "api/index/{0}".format(name) status, content, header = self._http_request( api, headers=self._create_capi_headers(), timeout=timeout) if status: json_parsed = json.loads(content) return status, json_parsed def get_fts_index_doc_count(self, name, timeout=30): """ get number of docs indexed""" json_parsed = {} api = self.fts_baseUrl + "api/index/{0}/count".format(name) status, content, header = self._http_request( api, headers=self._create_capi_headers(), timeout=timeout) if status: json_parsed = json.loads(content) return json_parsed['count'] def get_fts_index_uuid(self, name, timeout=30): """ Returns uuid of index/alias """ json_parsed = {} api = self.fts_baseUrl + "api/index/{0}".format(name) status, content, header = self._http_request( api, headers=self._create_capi_headers(), timeout=timeout) if status: json_parsed = json.loads(content) return json_parsed['indexDef']['uuid'] def get_fts_pindex_stats(self, timeout=30): """ Returns uuid of index/alias """ json_parsed = {} api = self.fts_baseUrl + "api/stats" status, content, header = self._http_request( api, headers=self._create_capi_headers(), timeout=timeout) if status: json_parsed = json.loads(content) return json_parsed['pindexes'] def delete_fts_index(self, name): """ delete fts index/alias """ api = self.fts_baseUrl + "api/index/{0}".format(name) status, content, header = self._http_request( api, 'DELETE', headers=self._create_capi_headers()) return status def delete_fts_index_extended_output(self, name): """ delete fts index/alias """ api = self.fts_baseUrl + "api/index/{0}".format(name) status, content, header = self._http_request( api, 'DELETE', headers=self._create_capi_headers()) return status, content, header def stop_fts_index_update(self, name): """ method to stop fts index from updating""" api = self.fts_baseUrl + "api/index/{0}/ingestControl/pause".format(name) log.info('calling api : {0}'.format(api)) status, content, header = self._http_request( api, 'POST', '', headers=self._create_capi_headers()) return status def resume_fts_index_update(self, name): """ method to stop fts index from updating""" api = self.fts_baseUrl + "api/index/{0}/ingestControl/resume".format(name) log.info('calling api : {0}'.format(api)) status, content, header = self._http_request( api, 'POST', '', headers=self._create_capi_headers()) return status def freeze_fts_index_partitions(self, name): """ method to freeze index partitions asignment""" api = self.fts_baseUrl+ "api/index/{0}/planFreezeControl/freeze".format(name) log.info('calling api : {0}'.format(api)) status, content, header = self._http_request( api, 'POST', '', headers=self._create_capi_headers()) return status def set_bleve_max_result_window(self, bmrw_value): """create or edit fts index , returns {"status":"ok"} on success""" api = self.fts_baseUrl + "api/managerOptions" params = {"bleveMaxResultWindow": str(bmrw_value)} log.info(json.dumps(params)) status, content, header = self._http_request(api, 'PUT', json.dumps(params, ensure_ascii=False), headers=self._create_capi_headers(), timeout=30) if status: log.info("Updated bleveMaxResultWindow") else: raise Exception("Error Updating bleveMaxResultWindow: {0}".format(content)) return status def set_node_setting(self, setting_name, value): """create or edit fts index , returns {"status":"ok"} on success""" api = self.fts_baseUrl + "api/managerOptions" params = {str(setting_name): str(value)} log.info(json.dumps(params)) status, content, header = self._http_request(api, 'PUT', json.dumps(params, ensure_ascii=False), headers=self._create_capi_headers(), timeout=30) if status: log.info("Updated {0}".format(setting_name)) else: raise Exception("Error Updating {0}: {1}".format(setting_name, content)) return status def unfreeze_fts_index_partitions(self, name): """ method to freeze index partitions asignment""" api = self.fts_baseUrl+ "api/index/{0}/planFreezeControl/unfreeze".format(name) log.info('calling api : {0}'.format(api)) status, content, header = self._http_request( api, 'POST', '', headers=self._create_capi_headers()) return status def disable_querying_on_fts_index(self, name): """ method to disable querying on index""" api = self.fts_baseUrl + "api/index/{0}/queryControl/disallow".format(name) log.info('calling api : {0}'.format(api)) status, content, header = self._http_request( api, 'POST', '', headers=self._create_capi_headers()) return status def enable_querying_on_fts_index(self, name): """ method to enable querying on index""" api = self.fts_baseUrl + "api/index/{0}/queryControl/allow".format(name) log.info('calling api : {0}'.format(api)) status, content, header = self._http_request( api, 'POST', '', headers=self._create_capi_headers()) return status def run_fts_query(self, index_name, query_json, timeout=70): """Method run an FTS query through rest api""" api = self.fts_baseUrl + "api/index/{0}/query".format(index_name) headers = self._create_capi_headers() status, content, header = self._http_request( api, "POST", json.dumps(query_json, ensure_ascii=False).encode('utf8'), headers, timeout=timeout) content = json.loads(content) if status: return content['total_hits'], content['hits'], content['took'], \ content['status'] else: return -1, content['error'], -1, content['status'] def run_fts_query_generalized(self, index_name, query_json, timeout=70): """Method run an FTS query through rest api""" api = self.fts_baseUrl + "api/index/{0}/query".format(index_name) headers = self._create_capi_headers() status, content, header = self._http_request( api, "POST", json.dumps(query_json, ensure_ascii=False).encode('utf8'), headers, timeout=timeout) content = json.loads(content) return content def run_fts_query_with_facets(self, index_name, query_json): """Method run an FTS query through rest api""" api = self.fts_baseUrl + "api/index/{0}/query".format(index_name) headers = self._create_capi_headers() status, content, header = self._http_request( api, "POST", json.dumps(query_json, ensure_ascii=False).encode('utf8'), headers, timeout=70) if status: content = json.loads(content) return content['total_hits'], content['hits'], content['took'], \ content['status'], content['facets'] """ End of FTS rest APIs """ def set_reb_cons_view(self, disable): """Enable/disable consistent view for rebalance tasks""" api = self.baseUrl + "internalSettings" params = {"indexAwareRebalanceDisabled": str(disable).lower()} params = urllib.parse.urlencode(params) status, content, header = self._http_request(api, "POST", params) log.info('Consistent-views during rebalance was set as indexAwareRebalanceDisabled={0}'\ .format(str(disable).lower())) return status def set_reb_index_waiting(self, disable): """Enable/disable rebalance index waiting""" api = self.baseUrl + "internalSettings" params = {"rebalanceIndexWaitingDisabled": str(disable).lower()} params = urllib.parse.urlencode(params) status, content, header = self._http_request(api, "POST", params) log.info('rebalance index waiting was set as rebalanceIndexWaitingDisabled={0}'\ .format(str(disable).lower())) return status def set_rebalance_index_pausing(self, disable): """Enable/disable index pausing during rebalance""" api = self.baseUrl + "internalSettings" params = {"rebalanceIndexPausingDisabled": str(disable).lower()} params = urllib.parse.urlencode(params) status, content, header = self._http_request(api, "POST", params) log.info('index pausing during rebalance was set as rebalanceIndexPausingDisabled={0}'\ .format(str(disable).lower())) return status def set_max_parallel_indexers(self, count): """set max parallel indexer threads""" api = self.baseUrl + "internalSettings" params = {"maxParallelIndexers": count} params = urllib.parse.urlencode(params) status, content, header = self._http_request(api, "POST", params) log.info('max parallel indexer threads was set as maxParallelIndexers={0}'.\ format(count)) return status def set_max_parallel_replica_indexers(self, count): """set max parallel replica indexers threads""" api = self.baseUrl + "internalSettings" params = {"maxParallelReplicaIndexers": count} params = urllib.parse.urlencode(params) status, content, header = self._http_request(api, "POST", params) log.info('max parallel replica indexers threads was set as maxParallelReplicaIndexers={0}'.\ format(count)) return status def get_internal_replication_type(self): buckets = self.get_buckets() cmd = "\'{ok, BC} = ns_bucket:get_bucket(%s), ns_bucket:replication_type(BC).\'" % buckets[0].name return self.diag_eval(cmd) def set_mc_threads(self, mc_threads=4): """ Change number of memcached threads and restart the cluster """ cmd = "[ns_config:update_key({node, N, memcached}, " \ "fun (PList) -> lists:keystore(verbosity, 1, PList," \ " {verbosity, \"-t %s\"}) end) " \ "|| N <- ns_node_disco:nodes_wanted()]." % mc_threads return self.diag_eval(cmd) def get_auto_compaction_settings(self): api = self.baseUrl + "settings/autoCompaction" status, content, header = self._http_request(api) return json.loads(content) def set_auto_compaction(self, parallelDBAndVC="false", dbFragmentThreshold=None, viewFragmntThreshold=None, dbFragmentThresholdPercentage=None, viewFragmntThresholdPercentage=None, allowedTimePeriodFromHour=None, allowedTimePeriodFromMin=None, allowedTimePeriodToHour=None, allowedTimePeriodToMin=None, allowedTimePeriodAbort=None, bucket=None): """Reset compaction values to default, try with old fields (dp4 build) and then try with newer fields""" params = {} api = self.baseUrl if bucket is None: # setting is cluster wide api = api + "controller/setAutoCompaction" else: # overriding per/bucket compaction setting api = api + "pools/default/buckets/" + bucket params["autoCompactionDefined"] = "true" # reuse current ram quota in mb per node num_nodes = len(self.node_statuses()) bucket_info = self.get_bucket_json(bucket) quota = self.get_bucket_json(bucket)["quota"]["ram"] // (1048576 * num_nodes) params["ramQuotaMB"] = quota params["parallelDBAndViewCompaction"] = parallelDBAndVC # Need to verify None because the value could be = 0 if dbFragmentThreshold is not None: params["databaseFragmentationThreshold[size]"] = dbFragmentThreshold if viewFragmntThreshold is not None: params["viewFragmentationThreshold[size]"] = viewFragmntThreshold if dbFragmentThresholdPercentage is not None: params["databaseFragmentationThreshold[percentage]"] = dbFragmentThresholdPercentage if viewFragmntThresholdPercentage is not None: params["viewFragmentationThreshold[percentage]"] = viewFragmntThresholdPercentage if allowedTimePeriodFromHour is not None: params["allowedTimePeriod[fromHour]"] = allowedTimePeriodFromHour if allowedTimePeriodFromMin is not None: params["allowedTimePeriod[fromMinute]"] = allowedTimePeriodFromMin if allowedTimePeriodToHour is not None: params["allowedTimePeriod[toHour]"] = allowedTimePeriodToHour if allowedTimePeriodToMin is not None: params["allowedTimePeriod[toMinute]"] = allowedTimePeriodToMin if allowedTimePeriodAbort is not None: params["allowedTimePeriod[abortOutside]"] = allowedTimePeriodAbort params = urllib.parse.urlencode(params) log.info("'%s' bucket's settings will be changed with parameters: %s" % (bucket, params)) return self._http_request(api, "POST", params) def disable_auto_compaction(self): """ Cluster-wide Setting Disable autocompaction on doc and view """ api = self.baseUrl + "controller/setAutoCompaction" log.info("Disable autocompaction in cluster-wide setting") status, content, header = self._http_request(api, "POST", "parallelDBAndViewCompaction=false") return status def set_purge_interval_and_parallel_compaction(self, interval=3, parallel="false"): """ Cluster-wide setting. Set purge interval Set parallel db and view compaction Return: status """ api = self.baseUrl + "controller/setAutoCompaction" log.info("Set purgeInterval to %s and parallel DB and view compaction to %s"\ % (interval, parallel)) params = {} params["purgeInterval"] = interval params["parallelDBAndViewCompaction"] = parallel params = urllib.parse.urlencode(params) status, content, header = self._http_request(api, "POST", params) return status, content def set_indexer_compaction(self, mode="circular", indexDayOfWeek=None, indexFromHour=0, indexFromMinute=0, abortOutside=False, indexToHour=0, indexToMinute=0, fragmentation=30): """Reset compaction values to default, try with old fields (dp4 build) and then try with newer fields""" params = {} api = self.baseUrl + "controller/setAutoCompaction" params["indexCompactionMode"] = mode params["indexCircularCompaction[interval][fromHour]"] = indexFromHour params["indexCircularCompaction[interval][fromMinute]"] = indexFromMinute params["indexCircularCompaction[interval][toHour]"] = indexToHour params["indexCircularCompaction[interval][toMinute]"] = indexToMinute if indexDayOfWeek: params["indexCircularCompaction[daysOfWeek]"] = indexDayOfWeek params["indexCircularCompaction[interval][abortOutside]"] = str(abortOutside).lower() params["parallelDBAndViewCompaction"] = "false" if mode == "full": params["indexFragmentationThreshold[percentage]"] = fragmentation log.info("Indexer Compaction Settings: %s" % (params)) params = urllib.parse.urlencode(params) return self._http_request(api, "POST", params) def set_global_loglevel(self, loglevel='error'): """Set cluster-wide logging level for core components Possible loglevel: -- debug -- info -- warn -- error """ api = self.baseUrl + 'diag/eval' request_body = 'rpc:eval_everywhere(erlang, apply, [fun () -> \ [ale:set_loglevel(L, {0}) || L <- \ [ns_server, couchdb, user, menelaus, ns_doctor, stats, \ rebalance, cluster, views, stderr]] end, []]).'.format(loglevel) return self._http_request(api=api, method='POST', params=request_body, headers=self._create_headers()) def set_indexer_params(self, parameter, val): """ :Possible parameters: -- indexerThreads -- memorySnapshotInterval -- stableSnapshotInterval -- maxRollbackPoints -- logLevel """ params = {} api = self.baseUrl + 'settings/indexes' params[parameter] = val params = urllib.parse.urlencode(params) status, content, header = self._http_request(api, "POST", params) log.info('Indexer {0} set to {1}'.format(parameter, val)) return status def get_global_index_settings(self): api = self.baseUrl + "settings/indexes" status, content, header = self._http_request(api) if status: return json.loads(content) return None def set_couchdb_option(self, section, option, value): """Dynamic settings changes""" cmd = 'ns_config:set({{couchdb, {{{0}, {1}}}}}, {2}).'.format(section, option, value) return self.diag_eval(cmd) def get_alerts(self): api = self.baseUrl + "pools/default/" status, content, header = self._http_request(api) json_parsed = json.loads(content) if status: if "alerts" in json_parsed: return json_parsed['alerts'] else: return None def get_nodes_data_from_cluster(self, param="nodes"): api = self.baseUrl + "pools/default/" status, content, header = self._http_request(api) json_parsed = json.loads(content) if status: if param in json_parsed: return json_parsed[param] else: return None def flush_bucket(self, bucket="default"): if isinstance(bucket, Bucket): bucket_name = bucket.name else: bucket_name = bucket api = self.baseUrl + "pools/default/buckets/%s/controller/doFlush" % (bucket_name) status, content, header = self._http_request(api, 'POST') if not status: raise BucketFlushFailed(self.ip, bucket_name) log.info("Flush for bucket '%s' was triggered" % bucket_name) return True def update_notifications(self, enable): api = self.baseUrl + 'settings/stats' params = urllib.parse.urlencode({'sendStats' : enable}) log.info('settings/stats params : {0}'.format(params)) status, content, header = self._http_request(api, 'POST', params) return status def get_notifications(self): api = self.baseUrl + 'settings/stats' status, content, header = self._http_request(api) json_parsed = json.loads(content) if status: return json_parsed["sendStats"] return None def get_num_rollback_stat(self, bucket): api = self.index_baseUrl + 'stats' status, content, header = self._http_request(api) json_parsed = json.loads(content) num_rollback = json_parsed["MAINT_STREAM:{}:num_rollbacks".format(bucket)] return num_rollback def get_num_rollback_to_zero_stat(self, bucket): api = self.index_baseUrl + 'stats' status, content, header = self._http_request(api) json_parsed = json.loads(content) num_rollback = json_parsed["MAINT_STREAM:{}:num_rollbacks_to_zero".format(bucket)] return num_rollback def get_logs(self, last_n=10, contains_text=None): api = self.baseUrl + 'logs' status, content, header = self._http_request(api) json_parsed = json.loads(content.decode("utf-8","ignore")) logs = json_parsed['list'] logs.reverse() result = [] for i in range(min(last_n, len(logs))): result.append(logs[i]) if contains_text is not None and contains_text in logs[i]["text"]: break return result def print_UI_logs(self, last_n=10, contains_text=None): logs = self.get_logs(last_n, contains_text) log.info("Latest logs from UI on {0}:".format(self.ip)) for lg in logs: log.error(lg) def get_ro_user(self): api = self.baseUrl + 'settings/readOnlyAdminName' status, content, header = self._http_request(api, 'GET', '') return content, status def delete_ro_user(self): api = self.baseUrl + 'settings/readOnlyUser' status, content, header = self._http_request(api, 'DELETE', '') return status def create_ro_user(self, username, password): api = self.baseUrl + 'settings/readOnlyUser' params = urllib.parse.urlencode({'username' : username, 'password' : password}) log.info('settings/readOnlyUser params : {0}'.format(params)) status, content, header = self._http_request(api, 'POST', params) return status # Change password for readonly user def changePass_ro_user(self, username, password): api = self.baseUrl + 'settings/readOnlyUser' params = urllib.parse.urlencode({'username' : username, 'password' : password}) log.info('settings/readOnlyUser params : {0}'.format(params)) status, content, header = self._http_request(api, 'PUT', params) return status '''Start Monitoring/Profiling Rest Calls''' def set_completed_requests_collection_duration(self, server, min_time): http = httplib2.Http(disable_ssl_certificate_validation=True) n1ql_port = CbServer.n1ql_port protocol = "http" if CbServer.use_https: n1ql_port = str(CbServer.ssl_port_map.get(str(n1ql_port), str(n1ql_port))) protocol = "https" api = "%s://%s:%s/" % (protocol,server.ip, n1ql_port) + "admin/settings" body = {"completed-threshold": min_time} headers = self._create_headers_with_auth('Administrator', 'password') response, content = http.request(api, "POST", headers=headers, body=json.dumps(body)) return response, content def set_completed_requests_max_entries(self, server, no_entries): http = httplib2.Http(disable_ssl_certificate_validation=True) n1ql_port = CbServer.n1ql_port protocol = "http" if CbServer.use_https: n1ql_port = str(CbServer.ssl_port_map.get(str(n1ql_port), str(n1ql_port))) protocol = "https" api = "%s://%s:%s/" % (protocol, server.ip, n1ql_port) + "admin/settings" body = {"completed-limit": no_entries} headers = self._create_headers_with_auth('Administrator', 'password') response, content = http.request(api, "POST", headers=headers, body=json.dumps(body)) return response, content def set_profiling(self, server, setting): http = httplib2.Http(disable_ssl_certificate_validation=True) n1ql_port = CbServer.n1ql_port protocol = "http" if CbServer.use_https: n1ql_port = str(CbServer.ssl_port_map.get(str(n1ql_port), str(n1ql_port))) protocol = "https" api = "%s://%s:%s/" % (protocol, server.ip, n1ql_port) + "admin/settings" body = {"profile": setting} headers = self._create_headers_with_auth('Administrator', 'password') response, content = http.request(api, "POST", headers=headers, body=json.dumps(body)) return response, content def set_query_servicers(self, server, setting, servicers="servicers"): http = httplib2.Http(disable_ssl_certificate_validation=True) n1ql_port = CbServer.n1ql_port protocol = "http" if CbServer.use_https: n1ql_port = str(CbServer.ssl_port_map.get(str(n1ql_port), str(n1ql_port))) protocol = "https" api = "%s://%s:%s/" % (protocol, server.ip, n1ql_port) + "admin/settings" body = {servicers: setting} headers = self._create_headers_with_auth('Administrator', 'password') response, content = http.request(api, "POST", headers=headers, body=json.dumps(body)) return response, content def set_profiling_controls(self, server, setting): http = httplib2.Http(disable_ssl_certificate_validation=True) n1ql_port = CbServer.n1ql_port protocol = "http" if CbServer.use_https: n1ql_port = str(CbServer.ssl_port_map.get(str(n1ql_port), str(n1ql_port))) protocol = "https" api = "%s://%s:%s/" % (protocol, server.ip, n1ql_port) + "admin/settings" body = {"controls": setting} headers = self._create_headers_with_auth('Administrator', 'password') response, content = http.request(api, "POST", headers=headers, body=json.dumps(body)) return response, content def get_query_admin_settings(self, server): http = httplib2.Http(disable_ssl_certificate_validation=True) n1ql_port = CbServer.n1ql_port protocol = "http" if CbServer.use_https: n1ql_port = str(CbServer.ssl_port_map.get(str(n1ql_port), str(n1ql_port))) protocol = "https" api = "%s://%s:%s/" % (protocol, server.ip, n1ql_port) + "admin/settings" headers = self._create_headers_with_auth('Administrator', 'password') response, content = http.request(api, "GET", headers=headers) result = json.loads(content) return result def get_query_vitals(self, server): http = httplib2.Http(disable_ssl_certificate_validation=True) n1ql_port = CbServer.n1ql_port protocol = "http" if CbServer.use_https: n1ql_port = str(CbServer.ssl_port_map.get(str(n1ql_port), str(n1ql_port))) protocol = "https" api = "%s://%s:%s/" % (protocol,server.ip, n1ql_port) + "admin/vitals" headers = self._create_headers_with_auth('Administrator', 'password') response, content = http.request(api, "GET", headers=headers) return response, content '''End Monitoring/Profiling Rest Calls''' def create_whitelist(self, server, whitelist): http = httplib2.Http(disable_ssl_certificate_validation=True) protocol = "http" if CbServer.use_https: protocol = "https" api = "%s://%s:%s/" % (protocol, server.ip, server.port) + "settings/querySettings/curlWhitelist" headers = self._create_headers_with_auth('Administrator', 'password') response, content = http.request(api, "POST", headers=headers, body=json.dumps(whitelist)) return response, content def query_tool(self, query, port=8093, timeout=1300, query_params={}, is_prepared=False, named_prepare=None, verbose = True, encoded_plan=None, servers=None): if timeout is None: timeout = 1300 protocol = "http" if CbServer.use_https: port = str(CbServer.ssl_port_map.get(str(port), str(port))) protocol = "https" key = 'prepared' if is_prepared else 'statement' headers = None prepared = json.dumps(query) if is_prepared: if named_prepare and encoded_plan: http = httplib2.Http(disable_ssl_certificate_validation=True) if len(servers)>1: url = "%s://%s:%s/query/service" % (protocol, servers[1].ip, port) else: url = "%s://%s:%s/query/service" % (protocol, self.ip, port) headers = self._create_headers_encoded_prepared() body = {'prepared': named_prepare, 'encoded_plan':encoded_plan} response, content = http.request(url, 'POST', headers=headers, body=json.dumps(body)) return eval(content) elif named_prepare and not encoded_plan: params = 'prepared=' + urllib.parse.quote(prepared, '~()') params = 'prepared="%s"'% named_prepare else: if isinstance(query, dict): prepared = json.dumps(query['name']) else: prepared = json.dumps(query) prepared = str(prepared) params = 'prepared=' + urllib.parse.quote(prepared, '~()') if 'creds' in query_params and query_params['creds']: headers = self._create_headers_with_auth(query_params['creds'][0]['user'], query_params['creds'][0]['pass']) api = "%s://%s:%s/query/service?%s" % (protocol, self.ip, port, params) log.info("%s"%api) else: params = {key : query} try: if 'creds' in query_params and query_params['creds']: headers = self._create_headers_with_auth(query_params['creds'][0]['user'], query_params['creds'][0]['pass']) del query_params['creds'] except Exception: traceback.print_exc() params.update(query_params) params = urllib.parse.urlencode(params) if verbose: log.info('query params : {0}'.format(params)) api = "%s://%s:%s/query?%s" % (protocol, self.ip, port, params) if 'query_context' in query_params and query_params['query_context']: log.info(f"Running Query with query_context: {query_params['query_context']}") try: status, content, header = self._http_request(api, 'POST', timeout=timeout, headers=headers) except Exception as ex: print("\nException error: ", str(ex)) print("\napi: ", api) print("\nheaders: ", headers) try: return json.loads(content) except ValueError: return content def analytics_tool(self, query, port=8095, timeout=650, query_params={}, is_prepared=False, named_prepare=None, verbose = True, encoded_plan=None, servers=None): protocol = "http" if CbServer.use_https: port = str(CbServer.ssl_port_map.get(str(port), str(port))) protocol = "https" key = 'prepared' if is_prepared else 'statement' headers = None content="" prepared = json.dumps(query) if is_prepared: if named_prepare and encoded_plan: http = httplib2.Http(disable_ssl_certificate_validation=True) if len(servers)>1: url = "%s://%s:%s/query/service" % (protocol, servers[1].ip, port) else: url = "%s://%s:%s/query/service" % (protocol, self.ip, port) headers = {'Content-type': 'application/json'} body = {'prepared': named_prepare, 'encoded_plan':encoded_plan} response, content = http.request(url, 'POST', headers=headers, body=json.dumps(body)) return eval(content) elif named_prepare and not encoded_plan: params = 'prepared=' + urllib.parse.quote(prepared, '~()') params = 'prepared="%s"'% named_prepare else: prepared = json.dumps(query) prepared = str(prepared.encode('utf-8')) params = 'prepared=' + urllib.parse.quote(prepared, '~()') if 'creds' in query_params and query_params['creds']: headers = self._create_headers_with_auth(query_params['creds'][0]['user'], query_params['creds'][0]['pass']) api = "%s/analytics/service?%s" % (self.cbas_base_url, params) log.info("%s"%api) else: params = {key : query} if 'creds' in query_params and query_params['creds']: headers = self._create_headers_with_auth(query_params['creds'][0]['user'], query_params['creds'][0]['pass']) del query_params['creds'] params.update(query_params) params = urllib.parse.urlencode(params) if verbose: log.info('query params : {0}'.format(params)) api = "%s/analytics/service?%s" % (self.cbas_base_url, params) status, content, header = self._http_request(api, 'POST', timeout=timeout, headers=headers) try: return json.loads(content) except ValueError: return content def query_tool_stats(self, server): n1ql_port = CbServer.n1ql_port protocol = "http" if CbServer.use_https: n1ql_port = CbServer.ssl_n1ql_port protocol = "https" log.info('query n1ql stats') api = "%s://%s:%s/admin/stats" % (protocol, server.ip, str(n1ql_port)) status, content, header = self._http_request(api, 'GET') log.info(content) try: return json.loads(content) except ValueError: return content def index_tool_stats(self, show_index_stats=True): log.info('index n1ql stats') port = CbServer.port protocol = "http" if CbServer.use_https: port = CbServer.ssl_port protocol = "https" api = "%s://%s:%s/indexStatus" % (protocol, self.ip, port) params = "" status, content, header = self._http_request(api, 'GET', params) if show_index_stats: log.info(content) try: return json.loads(content) except ValueError: return content # return all rack/zone info def get_all_zones_info(self, timeout=120): zones = {} api = self.baseUrl + 'pools/default/serverGroups' status, content, header = self._http_request(api, timeout=timeout) if status: zones = json.loads(content) else: raise Exception("Failed to get all zones info.\n \ Zone only supports from couchbase server version 2.5 and up.") return zones # return group name and unique uuid def get_zone_names(self): zone_names = {} zone_info = self.get_all_zones_info() if zone_info and len(zone_info["groups"]) >= 1: for i in range(0, len(zone_info["groups"])): # pools/default/serverGroups/ = 27 chars zone_names[zone_info["groups"][i]["name"]] = zone_info["groups"][i]["uri"][28:] return zone_names def add_zone(self, zone_name): api = self.baseUrl + 'pools/default/serverGroups' request_name = "name={0}".format(zone_name) status, content, header = self._http_request(api, "POST", \ params=request_name) if status: log.info("zone {0} is added".format(zone_name)) return True else: raise Exception("Failed to add zone with name: %s " % zone_name) def delete_zone(self, zone_name): api = self.baseUrl + 'pools/default/serverGroups/' # check if zone exist found = False zones = self.get_zone_names() for zone in zones: if zone_name == zone: api += zones[zone_name] found = True break if not found: raise Exception("There is not zone with name: %s in cluster" % zone_name) status, content, header = self._http_request(api, "DELETE") if status: log.info("zone {0} is deleted".format(zone_name)) else: raise Exception("Failed to delete zone with name: %s " % zone_name) def rename_zone(self, old_name, new_name): api = self.baseUrl + 'pools/default/serverGroups/' # check if zone exist found = False zones = self.get_zone_names() for zone in zones: if old_name == zone: api += zones[old_name] request_name = "name={0}".format(new_name) found = True break if not found: raise Exception("There is not zone with name: %s in cluster" % old_name) status, content, header = self._http_request(api, "PUT", params=request_name) if status: log.info("zone {0} is renamed to {1}".format(old_name, new_name)) else: raise Exception("Failed to rename zone with name: %s " % old_name) # get all nodes info in one zone/rack/group def get_nodes_in_zone(self, zone_name): nodes = {} tmp = {} zone_info = self.get_all_zones_info() if zone_name != "": found = False if len(zone_info["groups"]) >= 1: for i in range(0, len(zone_info["groups"])): if zone_info["groups"][i]["name"] == zone_name: tmp = zone_info["groups"][i]["nodes"] if not tmp: log.info("zone {0} is existed but no node in it".format(zone_name)) # remove port for node in tmp: node["hostname"] = node["hostname"].split(":") node["hostname"] = node["hostname"][0] nodes[node["hostname"]] = node found = True break if not found: raise Exception("There is not zone with name: %s in cluster" % zone_name) return nodes def get_zone_and_nodes(self): """ only return zones with node in its """ zones = {} tmp = {} zone_info = self.get_all_zones_info() if len(zone_info["groups"]) >= 1: for i in range(0, len(zone_info["groups"])): tmp = zone_info["groups"][i]["nodes"] if not tmp: log.info("zone {0} is existed but no node in it".format(tmp)) # remove port else: nodes = [] for node in tmp: node["hostname"] = node["hostname"].split(":") node["hostname"] = node["hostname"][0] print(node["hostname"][0]) nodes.append(node["hostname"]) zones[zone_info["groups"][i]["name"]] = nodes return zones def get_zone_uri(self): zone_uri = {} zone_info = self.get_all_zones_info() if zone_info and len(zone_info["groups"]) >= 1: for i in range(0, len(zone_info["groups"])): zone_uri[zone_info["groups"][i]["name"]] = zone_info["groups"][i]["uri"] return zone_uri def shuffle_nodes_in_zones(self, moved_nodes, source_zone, target_zone): # moved_nodes should be a IP list like # ["192.168.171.144", "192.168.171.145"] request = "" for i in range(0, len(moved_nodes)): moved_nodes[i] = "ns_1@" + moved_nodes[i] all_zones = self.get_all_zones_info() api = self.baseUrl + all_zones["uri"][1:] moved_node_json = [] for i in range(0, len(all_zones["groups"])): for node in all_zones["groups"][i]["nodes"]: if all_zones["groups"][i]["name"] == source_zone: for n in moved_nodes: if n == node["otpNode"]: moved_node_json.append({"otpNode": node["otpNode"]}) zone_json = {} group_json = [] for i in range(0, len(all_zones["groups"])): node_j = [] zone_json["uri"] = all_zones["groups"][i]["uri"] zone_json["name"] = all_zones["groups"][i]["name"] zone_json["nodes"] = node_j if not all_zones["groups"][i]["nodes"]: if all_zones["groups"][i]["name"] == target_zone: for i in range(0, len(moved_node_json)): zone_json["nodes"].append(moved_node_json[i]) else: zone_json["nodes"] = [] else: for node in all_zones["groups"][i]["nodes"]: if all_zones["groups"][i]["name"] == source_zone and \ node["otpNode"] in moved_nodes: pass else: node_j.append({"otpNode": node["otpNode"]}) if all_zones["groups"][i]["name"] == target_zone: for k in range(0, len(moved_node_json)): node_j.append(moved_node_json[k]) zone_json["nodes"] = node_j group_json.append({"name": zone_json["name"], "uri": zone_json["uri"], "nodes": zone_json["nodes"]}) request = '{{"groups": {0} }}'.format(json.dumps(group_json)) status, content, header = self._http_request(api, "PUT", params=request) # sample request format # request = ' {"groups":[{"uri":"/pools/default/serverGroups/0","nodes": [] },\ # {"uri":"/pools/default/serverGroups/c8275b7a88e6745c02815dde4a505e70","nodes": [] },\ # {"uri":"/pools/default/serverGroups/1acd9810a027068bd14a1ddd43db414f","nodes": \ # [{"otpNode":"ns_1@192.168.171.144"},{"otpNode":"ns_1@192.168.171.145"}]} ]} ' return status def is_zone_exist(self, zone_name): found = False zones = self.get_zone_names() if zones: for zone in zones: if zone_name == zone: found = True return True break if not found: log.error("There is not zone with name: {0} in cluster.".format(zone_name)) return False def get_items_info(self, keys, bucket='default'): items_info = {} for key in keys: api = '{0}{1}{2}/docs/{3}'.format(self.baseUrl, 'pools/default/buckets/', bucket, key) status, content, header = self._http_request(api) if status: items_info[key] = json.loads(content) return items_info def start_cluster_logs_collection(self, nodes="*", upload=False, \ uploadHost=None, customer="", ticket=""): if not upload: params = urllib.parse.urlencode({"nodes":nodes}) else: params = urllib.parse.urlencode({"nodes":nodes, "uploadHost":uploadHost, \ "customer":customer, "ticket":ticket}) api = self.baseUrl + "controller/startLogsCollection" status, content, header = self._http_request(api, "POST", params) return status, content def get_cluster_logs_collection_info(self): api = self.baseUrl + "pools/default/tasks/" status, content, header = self._http_request(api, "GET") if status: tmp = json.loads(content) for k in tmp: if k["type"] == "clusterLogsCollection": content = k return content return None """ result["progress"]: progress logs collected at cluster level result["status]: status logs collected at cluster level result["perNode"]: all information logs collected at each node """ def get_cluster_logs_collection_status(self): result = self.get_cluster_logs_collection_info() if result: return result["progress"], result["status"], result["perNode"] return None, None, None def cancel_cluster_logs_collection(self): api = self.baseUrl + "controller/cancelLogsCollection" status, content, header = self._http_request(api, "POST") return status, content def set_log_redaction_level(self, redaction_level="none"): api = self.baseUrl + "settings/logRedaction" params = urllib.parse.urlencode({"logRedactionLevel":redaction_level}) status, content, header = self._http_request(api, "POST", params) if status: result = json.loads(content) if result["logRedactionLevel"] == redaction_level: return True else: return False return False def get_bucket_CCCP(self, bucket): log.info("Getting CCCP config ") api = '%spools/default/b/%s' % (self.baseUrl, bucket) if isinstance(bucket, Bucket): api = '%spools/default/b/%s' % (self.baseUrl, bucket.name) status, content, header = self._http_request(api) if status: return json.loads(content) return None def get_recovery_task(self): content = self.ns_server_tasks() for item in content: if item["type"] == "recovery": return item return None def get_recovery_progress(self, recoveryStatusURI): api = '%s%s' % (self.baseUrl, recoveryStatusURI) status, content, header = self._http_request(api) if status: return json.loads(content) return None def get_warming_up_tasks(self): tasks = self.ns_server_tasks() tasks_warmup = [] for task in tasks: if task["type"] == "warming_up": tasks_warmup.append(task) return tasks_warmup def compact_bucket(self, bucket="default"): api = self.baseUrl + 'pools/default/buckets/{0}/controller/compactBucket'.format(bucket) status, content, header = self._http_request(api, 'POST') if status: log.info('bucket compaction successful') else: raise BucketCompactionException(bucket) return True def cancel_bucket_compaction(self, bucket="default"): api = self.baseUrl + 'pools/default/buckets/{0}/controller/cancelBucketCompaction'.format(bucket) if isinstance(bucket, Bucket): api = self.baseUrl + 'pools/default/buckets/{0}/controller/cancelBucketCompaction'.format(bucket.name) status, content, header = self._http_request(api, 'POST') log.info("Status is {0}".format(status)) if status: log.info('Cancel bucket compaction successful') else: raise BucketCompactionException(bucket) return True def set_bucket_compressionMode(self, bucket="default", mode="passive"): api = self.baseUrl + "pools/default/buckets/" + bucket body = {'compressionMode': mode} params = urllib.parse.urlencode(body) headers = self._create_headers() status, content, header = self._http_request(api, 'POST', params=params, headers=headers) log.info("{0} with params: {1}".format(api, params)) if not status: raise Exception("Unable to set compressionMode {0} for bucket {1}".format(mode, bucket)) '''LDAP Rest API ''' ''' clearLDAPSettings - Function to clear LDAP settings Parameter - None Returns - status of LDAPAuth clear command ''' def clearLDAPSettings(self): api = self.baseUrl + 'settings/saslauthdAuth' params = urllib.parse.urlencode({'enabled':'false'}) status, content, header = self._http_request(api, 'POST', params) return status, content, header ''' ldapUserRestOperation - Execute LDAP REST API Input Parameter - authOperation - this is for auth need to be enabled or disabled - True or 0 currAdmmins - a list of username to add to full admin matching with ldap currROAdmins - a list of username to add to RO Admin Returns - status, content and header for the command executed ''' def ldapUserRestOperation(self, authOperation, adminUser='', ROadminUser=''): authOperation = authOperation currAdmins = '' currROAdmins = '' if (adminUser != ''): for user in adminUser: currAdmins = user[0] + "\n\r" + currAdmins if (ROadminUser != ''): for user in ROadminUser: currROAdmins = user[0] + "\n\r" + currROAdmins content = self.executeLDAPCommand(authOperation, currAdmins, currROAdmins) '''LDAP Rest API ''' ''' clearLDAPSettings - Function to clear LDAP settings Parameter - None Returns - status of LDAPAuth clear command ''' def clearLDAPSettings (self): api = self.baseUrl + 'settings/saslauthdAuth' params = urllib.parse.urlencode({'enabled':'false'}) status, content, header = self._http_request(api, 'POST', params) return status, content, header ''' ldapUserRestOperation - Execute LDAP REST API Input Parameter - authOperation - this is for auth need to be enabled or disabled - True or 0 currAdmmins - a list of username to add to full admin matching with ldap currROAdmins - a list of username to add to RO Admin Returns - status, content and header for the command executed ''' def ldapUserRestOperation(self, authOperation, adminUser='', ROadminUser='', exclude=None): if (authOperation): authOperation = 'true' else: authOperation = 'false' currAdmins = '' currROAdmins = '' if (adminUser != ''): for user in adminUser: currAdmins = user[0] + "\n\r" + currAdmins if (ROadminUser != ''): for user in ROadminUser: currROAdmins = user[0] + "\n\r" + currROAdmins content = self.executeLDAPCommand(authOperation, currAdmins, currROAdmins, exclude) ''' executeLDAPCommand - Execute LDAP REST API Input Parameter - authOperation - this is for auth need to be enabled or disabled - True or 0 currAdmmins - a list of username to add to full admin matching with ldap currROAdmins - a list of username to add to RO Admin Returns - status, content and header for the command executed ''' def executeLDAPCommand(self, authOperation, currAdmins, currROAdmins, exclude=None): api = self.baseUrl + "settings/saslauthdAuth" if (exclude is None): log.info ("into exclude is None") params = urllib.parse.urlencode({ 'enabled': authOperation, 'admins': '{0}'.format(currAdmins), 'roAdmins': '{0}'.format(currROAdmins), }) else: log.info ("Into exclude for value of fullAdmin {0}".format(exclude)) if (exclude == 'fullAdmin'): params = urllib.parse.urlencode({ 'enabled': authOperation, 'roAdmins': '{0}'.format(currROAdmins), }) else: log.info ("Into exclude for value of fullAdmin {0}".format(exclude)) params = urllib.parse.urlencode({ 'enabled': authOperation, 'admins': '{0}'.format(currAdmins), }) status, content, header = self._http_request(api, 'POST', params) return content ''' validateLogin - Validate if user can login using a REST API Input Parameter - user and password to check for login. Also take a boolean to decide if the status should be 200 or 400 and everything else should be false Returns - True of false based if user should login or login fail ''' def validateLogin(self, user, password, login, getContent=False): api = self.baseUrl + "uilogin" header = {'Content-type': 'application/x-www-form-urlencoded'} params = urllib.parse.urlencode({'user':'{0}'.format(user), 'password':'{0}'.format(password)}) log.info ("value of param is {0}".format(params)) http = httplib2.Http() status, content = http.request(api, 'POST', headers=header, body=params) log.info ("Status of login command - {0}".format(status)) if (getContent): return status, content if ((status['status'] == "200" and login == True) or (status ['status'] == "400" and login == False)): return True else: return False ''' ldapRestOperationGet - Get setting of LDAPAuth - Settings Returns - list of Admins, ROAdmins and is LDAPAuth enabled or not ''' def ldapRestOperationGetResponse(self): log.info ("GET command for LDAP Auth") api = self.baseUrl + "settings/saslauthdAuth" status, content, header = self._http_request(api, 'GET') return json.loads(content) ''' executeValidateCredentials - API to check credentials of users Input - user and password that needs validation Returns - [role]:<currentrole> [source]:<saslauthd,builtin> ''' def executeValidateCredentials(self, user, password): api = self.baseUrl + "validateCredentials" params = urllib.parse.urlencode({ 'user':'{0}'.format(user), 'password':'{0}'.format(password) }) status, content, header = self._http_request(api, 'POST', params) log.info ("Status of executeValidateCredentials command - {0}".format(status)) return status, json.loads(content) '''MadHatter LDAP Group Support''' ''' Assign group roles ''' def add_group_role(self,group_name,description,roles,ldap_group_ref=None): api = self.baseUrl + "/settings/rbac/groups/" + group_name if ldap_group_ref is not None: params = urllib.parse.urlencode({ 'description':'{0}'.format(description), 'roles':'{0}'.format(roles), 'ldap_group_ref':'{0}'.format(ldap_group_ref) }) else: params = urllib.parse.urlencode({ 'description':'{0}'.format(description), 'roles':'{0}'.format(roles) }) status, content, header = self._http_request(api, 'PUT', params) log.info ("Status of Adding role to group command is {0}".format(status)) return status, json.loads(content) def delete_group(self,group_name): api = self.baseUrl + "/settings/rbac/groups/" + group_name status, content, header = self._http_request(api, 'DELETE') log.info ("Status of Delete role from CB is {0}".format(status)) return status, json.loads(content) def get_group_list(self): api = self.baseUrl + "/settings/rbac/groups/" status, content, header = self._http_request(api, 'GET') return status, json.loads(content) def get_group_details(self, group_name): api = self.baseUrl + "/settings/rbac/groups/" + group_name status, content, header = self._http_request(api, 'GET') return status, json.loads(content) def add_user_group(self,group_name,user_name): api = self.baseUrl + "/settings/rbac/users/local/" + user_name params = urllib.parse.urlencode({ 'groups':'{0}'.format(group_name) }) status, content, header = self._http_request(api, 'PUT', params) log.info ("Status of Adding role to group command is {0}".format(status)) return status, json.loads(content) def get_user_group(self,user_name): api = self.baseUrl + "/settings/rbac/users/local/" + user_name status, content, header = self._http_request(api, 'GET') log.info ("Status of Adding role to group command is {0}".format(status)) return status, json.loads(content) def grp_invalidate_cache(self): api = self.baseUrl + "/settings/invalidateLDAPCache/" status, content, header = self._http_request(api, 'POST') log.info("Status of Adding role to group command is {0}".format(status)) return status, json.loads(content) def invalidate_ldap_cache(self): api = self.baseUrl + '/settings/invalidateLDAPCache' status, content, header = self._http_request(api, 'POST') log.info("Status of Invalidate LDAP Cached is {0}".format(status)) return status, json.loads(content) def ldap_validate_conn(self): api = self.baseUrl + "/settings/ldap/validate/connectivity" status, content, header = self._http_request(api, 'POST') log.info("Status of Adding role to group command is {0}".format(status)) return status, json.loads(content) def ldap_validate_authen(self, user_name, password='password'): api = self.baseUrl + "/settings/ldap/validate/authentication" params = urllib.parse.urlencode({ 'auth_user': '{0}'.format(user_name), 'auth_pass': '{0}'.format(password) }) status, content, header = self._http_request(api, 'POST', params) log.info("Status of Adding role to group command is {0}".format(status)) return status, json.loads(content) def ldap_validate_grp_query(self, user): api = self.baseUrl + "/settings/ldap/validate/groups_query" params = urllib.parse.urlencode({ 'groups_query_user':'{0}'.format(user) }) status, content, header = self._http_request(api, 'POST',params) log.info ("Status of Adding role to group command is {0}".format(status)) return status, json.loads(content) def setup_ldap(self, data, extraparam): api = self.baseUrl + '/settings/ldap/' params = urllib.parse.urlencode(data) params = params + "&" + extraparam status, content, header = self._http_request(api, 'POST',params) log.info ("Status of Setting up LDAP command is {0}".format(status)) return status, json.loads(content) ''' Audit Commands ''' ''' getAuditSettings - API returns audit settings for Audit Input - None Returns - [archive_path]:<path for archieve> [auditd_enabled]:<enabled disabled status for auditd> [log_path]:<path for logs> [rotate_interval]:<log rotate interval> ''' def getAuditSettings(self): api = self.baseUrl + "settings/audit" status, content, header = self._http_request(api, 'GET') return json.loads(content) ''' getAuditSettings - API returns audit settings for Audit Input - [archive_path]:<path for archieve> [auditd_enabled]:<enabled disabled status for auditd> [rotate_interval]:<log rotate interval in seconds> ''' def setAuditSettings(self, enabled='true', rotateInterval=86400, logPath='/opt/couchbase/var/lib/couchbase/logs', services_to_disable=None): api = self.baseUrl + "settings/audit" params = {'rotateInterval':'{0}'.format(rotateInterval), 'auditdEnabled':'{0}'.format(enabled), 'logPath':'{0}'.format(logPath)} if services_to_disable: params['disabled'] = ",".join(services_to_disable) params = urllib.parse.urlencode(params) status, content, header = self._http_request(api, 'POST', params) log.info ("Value os status is {0}".format(status)) log.info ("Value of content is {0}".format(content)) if status: return status else: return status, json.loads(content) def get_audit_descriptors(self): api = self.baseUrl + "/settings/audit/descriptors" status, content, header = self._http_request(api, 'GET', headers=self._create_capi_headers()) return json.loads(content) if status else None def _set_secrets_password(self, new_password): api = self.baseUrl + "/node/controller/changeMasterPassword" params = urllib.parse.urlencode({ 'newPassword': '{0}'.format(new_password.encode('utf-8').strip()) }) log.info("Params getting set is ---- {0}".format(params)) params = params.replace('%24', '$') params = params.replace('%3D', '=') log.info("Params getting set is ---- {0}".format(params)) status, content, header = self._http_request(api, 'POST', params) log.info("Status of set password command - {0}".format(status)) log.info("Content of the response is {0}".format(content)) log.info ("Header of the response is {0}".format(header)) return status def set_downgrade_storage_mode_with_rest(self, downgrade=True, username="Administrator", password="password"): authorization = self.get_authorization(username, password) if downgrade: api = self.index_baseUrl + 'settings/storageMode?downgrade=true' else: api = self.index_baseUrl + 'settings/storageMode?downgrade=false' headers = {'Content-type': 'application/json','Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'POST', headers=headers) if not status: raise Exception(content) return json.loads(content) def create_index_with_rest(self, create_info, username="Administrator", password="password"): log.info("CREATE INDEX USING REST WITH PARAMETERS: " + str(create_info)) authorization = self.get_authorization(username, password) api = self.index_baseUrl + 'internal/indexes?create=true' headers = {'Content-type': 'application/json','Authorization': 'Basic %s' % authorization} params = json.loads("{0}".format(create_info).replace('\'', '"').replace('True', 'true').replace('False', 'false')) status, content, header = self._http_request(api, 'POST', headers=headers, params=json.dumps(params).encode("ascii", "ignore")) if not status: raise Exception(content) return json.loads(content) def build_index_with_rest(self, id, username="Administrator", password="password"): credentials = '{}:{}'.format(self.username, self.password) authorization = base64.encodebytes(credentials.encode('utf-8')) authorization = authorization.decode('utf-8').rstrip('\n') api = self.index_baseUrl + 'internal/indexes?build=true' build_info = {'ids': [id]} headers = {'Content-type': 'application/json','Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'PUT', headers=headers, params=json.dumps(build_info)) if not status: raise Exception(content) return json.loads(content) def drop_index_with_rest(self, id, username="Administrator", password="password"): authorization = self.get_authorization(username, password) url = 'internal/index/{0}'.format(id) api = self.index_baseUrl + url headers = {'Content-type': 'application/json','Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'DELETE', headers=headers) if not status: raise Exception(content) def get_all_indexes_with_rest(self, username="Administrator", password="password"): credentials = '{}:{}'.format(self.username, self.password) authorization = base64.encodebytes(credentials.encode('utf-8')) authorization = authorization.decode('utf-8').rstrip('\n') url = 'internal/indexes' api = self.index_baseUrl + url headers = {'Content-type': 'application/json','Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'GET', headers=headers) if not status: raise Exception(content) return json.loads(content) def lookup_gsi_index_with_rest(self, id, body, username="Administrator", password="password"): authorization = self.get_authorization(username, password) url = 'internal/index/{0}?lookup=true'.format(id) api = self.index_baseUrl + url headers = {'Content-type': 'application/json','Authorization': 'Basic %s' % authorization} params = json.loads("{0}".format(body).replace('\'', '"').replace('True', 'true').replace('False', 'false')) status, content, header = self._http_request(api, 'GET', headers=headers, params=json.dumps(params).encode("ascii", "ignore")) if not status: raise Exception(content) return json.loads(content) def full_table_scan_gsi_index_with_rest(self, id, body, username="Administrator", password="password"): if "limit" not in list(body.keys()): body["limit"] = 900000 authorization = self.get_authorization(username, password) url = 'internal/index/{0}?scanall=true'.format(id) api = self.index_baseUrl + url headers = {'Content-type': 'application/json','Authorization': 'Basic %s' % authorization} params = json.loads("{0}".format(body).replace('\'', '"').replace('True', 'true').replace('False', 'false')) status, content, header = self._http_request( api, 'GET', headers=headers, params=json.dumps(params).encode("ascii", "ignore")) if not status: raise Exception(content) # Following line is added since the content uses chunked encoding chunkless_content = content.decode().replace("][", ", \n") return json.loads(chunkless_content) def range_scan_gsi_index_with_rest(self, id, body, username="Administrator", password="password"): if "limit" not in list(body.keys()): body["limit"] = 300000 authorization = self.get_authorization(username, password) url = 'internal/index/{0}?range=true'.format(id) api = self.index_baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} params = json.loads("{0}".format(body).replace( '\'', '"').replace('True', 'true').replace('False', 'false')) status, content, header = self._http_request( api, 'GET', headers=headers, params=json.dumps(params).encode("ascii", "ignore")) if not status: raise Exception(content) #Below line is there because of MB-20758 content = content.split(b'[]')[0].decode() # Following line is added since the content uses chunked encoding chunkless_content = content.decode().replace("][", ", \n") return json.loads(chunkless_content) def multiscan_for_gsi_index_with_rest(self, id, body, username="Administrator", password="password"): authorization = self.get_authorization(username, password) url = 'internal/index/{0}?multiscan=true'.format(id) api = self.index_baseUrl + url headers = {'Accept': 'application/json','Authorization': 'Basic %s' % authorization} params = json.loads("{0}".format(body).replace('\'', '"').replace( 'True', 'true').replace('False', 'false').replace( "~[]{}UnboundedtruenilNA~", "~[]{}UnboundedTruenilNA~")) params = json.dumps(params).encode("ascii", "ignore").decode().replace("\\\\", "\\") log.info(json.dumps(params).encode("ascii", "ignore")) status, content, header = self._http_request(api, 'GET', headers=headers, params=params) if not status: raise Exception(content) #Below line is there because of MB-20758 content = content.split(b'[]')[0].decode() # Following line is added since the content uses chunked encoding chunkless_content = content.replace("][", ", \n") if chunkless_content: return json.loads(chunkless_content) else: return content def multiscan_count_for_gsi_index_with_rest(self, id, body, username="Administrator", password="password"): authorization = self.get_authorization(username, password) url = 'internal/index/{0}?multiscancount=true'.format(id) api = self.index_baseUrl + url headers = {'Accept': 'application/json','Authorization': 'Basic %s' % authorization} count_cmd_body = body.replace('\'', '"').replace('True', 'true').replace('False', 'false') count_cmd_body = count_cmd_body.replace("~[]{}UnboundedtruenilNA~", "~[]{}UnboundedTruenilNA~") params = json.loads(count_cmd_body) params = json.dumps(params).encode("ascii", "ignore").decode().replace("\\\\", "\\") log.info(json.dumps(params).encode("ascii", "ignore")) status, content, header = self._http_request(api, 'GET', headers=headers, params=params) if not status: raise Exception(content) #Below line is there because of MB-20758 content = content.split(b'[]')[0].decode() # Following line is added since the content uses chunked encoding chunkless_content = content.replace("][", ", \n") if chunkless_content: return json.loads(chunkless_content) else: return content 'Get list of all roles that exist in the system' def retrive_all_user_role(self): url = "/settings/rbac/roles" api = self.baseUrl + url status, content, header = self._http_request(api, 'GET') if not status: raise Exception(content) return json.loads(content) 'Get list of current users and rols assigned to them' def retrieve_user_roles(self): url = "/settings/rbac/users" api = self.baseUrl + url status, content, header = self._http_request(api, 'GET') if not status: raise Exception(content) return json.loads(content) ''' Add/Update user role assignment user_id=userid of the user to act on payload=name=<nameofuser>&roles=admin,cluster_admin''' def set_user_roles(self, user_id, payload): url = "settings/rbac/users/" + user_id api = self.baseUrl + url status, content, header = self._http_request(api, 'PUT', payload) if not status: raise Exception(content) return json.loads(content) ''' Delete user from couchbase role assignment user_id=userid of user to act on''' def delete_user_roles(self, user_id): url = "settings/rbac/users/local/" + user_id api = self.baseUrl + url status, content, header = self._http_request(api, 'DELETE') if not status: raise Exception(content) return json.loads(content) ''' Returns base64 string of username:password ''' def get_authorization(self, username, password): credentials = '{}:{}'.format(username, password) authorization = base64.encodebytes(credentials.encode('utf-8')) return authorization.decode('utf-8').rstrip('\n') ''' Return list of permission with True/False if user has permission or not user_id = userid for checking permission password = password for userid permission_set=cluster.bucket[default].stats!read,cluster.bucket[default]!write ''' def check_user_permission(self, user_id, password, permission_set): url = "pools/default/checkPermissions/" api = self.baseUrl + url authorization = self.get_authorization(user_id, password) header = {'Content-Type': 'application/x-www-form-urlencoded', 'Authorization': 'Basic %s' % authorization, 'Accept': '*/*'} status, content, header = self._http_request(api, 'POST', params=permission_set, headers=header) if not status: raise Exception(content) return json.loads(content) ''' Add/Update user role assignment user_id=userid of the user to act on payload=name=<nameofuser>&roles=admin,cluster_admin&password=<password> if roles=<empty> user will be created with no roles''' def add_set_builtin_user(self, user_id, payload): url = "settings/rbac/users/local/" + user_id api = self.baseUrl + url status, content, header = self._http_request(api, 'PUT', payload) if not status: raise Exception(content) return json.loads(content) ''' Add External User ''' def add_external_user(self,user_id,payload): url = "settings/rbac/users/external/" + user_id api = self.baseUrl + url status, content, header = self._http_request(api, 'PUT', payload) if not status: raise Exception(content) return json.loads(content) ''' Delete External User ''' def delete_external_user(self,user_id): url = "settings/rbac/users/external/" + user_id api = self.baseUrl + url status, content, header = self._http_request(api, 'DELETE') if not status: raise Exception(content) return json.loads(content) ''' Delete built-in user ''' def delete_builtin_user(self, user_id): url = "settings/rbac/users/local/" + user_id api = self.baseUrl + url status, content, header = self._http_request(api, 'DELETE') if not status: raise Exception(content) return json.loads(content) ''' Add/Update user role assignment user_id=userid of the user to act on password=<new password>''' def change_password_builtin_user(self, user_id, password): url = "controller/changePassword/" + user_id api = self.baseUrl + url status, content, header = self._http_request(api, 'POST', password) if not status: raise Exception(content) return json.loads(content) # Applicable to eventing service ''' Eventing lifecycle operation ''' def lifecycle_operation(self, name, operation,body=None): if self.eventing_role: authorization = self.get_authorization("eventing_admin", "password") else: authorization = self.get_authorization(self.username, self.password) url = "api/v1/functions/" + name +"/"+ operation api = self.eventing_baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} if body != None: status, content, header = self._http_request(api, 'POST', headers=headers, params=json.dumps(body).encode("ascii", "ignore")) else: status, content, header = self._http_request(api, 'POST', headers=headers) if not status: raise Exception(content) return content ''' Save the Function so that it is visible in UI ''' def save_function(self, name, body): authorization = self.get_authorization(self.username, self.password) url = "_p/event/saveAppTempStore/?name=" + name api = self.baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'POST', headers=headers, params=json.dumps(body).encode("ascii", "ignore")) if not status: raise Exception(content) return content ''' Deploy the Function ''' def deploy_function(self, name, body): authorization = self.get_authorization(self.username, self.password) url = "_p/event/setApplication/?name=" + name api = self.baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'POST', headers=headers, params=json.dumps(body).encode("ascii", "ignore")) if not status: raise Exception(content) return content ''' GET all the Functions ''' def get_all_functions(self): if self.eventing_role: authorization = self.get_authorization("eventing_admin", "password") else: authorization = self.get_authorization(self.username, self.password) url = "api/v1/functions" api = self.eventing_baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'GET', headers=headers) if not status: raise Exception(content) return content ''' Undeploy the Function ''' def set_settings_for_function(self, name, body): if self.eventing_role: authorization = self.get_authorization("eventing_admin", "password") else: authorization = self.get_authorization(self.username, self.password) url = "api/v1/functions/" + name +"/settings" api = self.eventing_baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'POST', headers=headers, params=json.dumps(body).encode("ascii", "ignore")) if not status: raise Exception(content) return content ''' deploy the Function ''' def deploy_function_by_name(self, name): if self.eventing_role: authorization = self.get_authorization("eventing_admin", "password") else: authorization = self.get_authorization(self.username, self.password) url = "api/v1/functions/" + name + "/settings" body = {"deployment_status": True, "processing_status": True} api = self.eventing_baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'POST', headers=headers, params=json.dumps(body).encode("ascii", "ignore")) if not status: raise Exception(content) return content ''' pause the Function ''' def pause_function_by_name(self, name): if self.eventing_role: authorization = self.get_authorization("eventing_admin", "password") else: authorization = self.get_authorization(self.username, self.password) url = "api/v1/functions/" + name + "/settings" body = {"deployment_status": True, "processing_status": False} api = self.eventing_baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'POST', headers=headers, params=json.dumps(body).encode("ascii", "ignore")) if not status: raise Exception(content) return content ''' undeploy the Function ''' def undeploy_function(self, name): if self.eventing_role: authorization = self.get_authorization("eventing_admin", "password") else: authorization = self.get_authorization(self.username, self.password) url = "api/v1/functions/" + name +"/settings" body= {"deployment_status": False, "processing_status": False} api = self.eventing_baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'POST', headers=headers, params=json.dumps(body).encode("ascii", "ignore")) if not status: raise Exception(content) return content ''' Delete all the functions ''' def delete_all_function(self): if self.eventing_role: authorization = self.get_authorization("eventing_admin", "password") else: authorization = self.get_authorization(self.username, self.password) url = "api/v1/functions" api = self.eventing_baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'DELETE', headers=headers) if not status: raise Exception(content) return content ''' Delete single function ''' def delete_single_function(self, name): if self.eventing_role: authorization = self.get_authorization("eventing_admin", "password") else: authorization = self.get_authorization(self.username, self.password) url = "api/v1/functions/" + name api = self.eventing_baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'DELETE', headers=headers) if not status: raise Exception(content) return content ''' Delete the Function from UI ''' def delete_function_from_temp_store(self, name): authorization = self.get_authorization(self.username, self.password) url = "_p/event/deleteAppTempStore/?name=" + name api = self.baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'DELETE', headers=headers) if not status: raise Exception(content) return content ''' Delete the Function ''' def delete_function(self, name): authorization = self.get_authorization(self.username, self.password) url = "_p/event/deleteApplication/?name=" + name api = self.baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'DELETE', headers=headers) if not status: raise Exception(content) return content ''' Export the Function ''' def export_function(self, name): export_map = {} if self.eventing_role: authorization = self.get_authorization("eventing_admin", "password") else: authorization = self.get_authorization(self.username, self.password) url = "api/v1/export/" + name api = self.eventing_baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'GET', headers=headers) if not status: raise Exception(content) if status: json_parsed = json.loads(content) for key in list(json_parsed[0].keys()): # returns an array tokens = key.split(":") val = json_parsed[0][key] if len(tokens) == 1: field = tokens[0] export_map[field] = val return export_map ''' Import the Function ''' def import_function(self, body): if self.eventing_role: authorization = self.get_authorization("eventing_admin", "password") else: authorization = self.get_authorization(self.username, self.password) url = "api/v1/import" api = self.eventing_baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'POST', headers=headers, params=body) if not status: raise Exception(content) return content ''' Ensure that the eventing node is out of bootstrap node ''' def get_deployed_eventing_apps(self): if self.eventing_role: authorization = self.get_authorization("eventing_admin", "password") else: authorization = self.get_authorization(self.username, self.password) url = "getDeployedApps" api = self.eventing_baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'GET', headers=headers) if not status: raise Exception(content) return json.loads(content) ''' Ensure that the eventing node is out of bootstrap node ''' def get_running_eventing_apps(self): if self.eventing_role: authorization = self.get_authorization("eventing_admin", "password") else: authorization = self.get_authorization(self.username, self.password) url = "getRunningApps" api = self.eventing_baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'GET', headers=headers) if not status: raise Exception(content) return json.loads(content) ''' composite status of a handler ''' def get_composite_eventing_status(self): if self.eventing_role: authorization = self.get_authorization("eventing_admin", "password") else: authorization = self.get_authorization(self.username, self.password) url = "api/v1/status" api = self.eventing_baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'GET', headers=headers) if not status: raise Exception(content) return json.loads(content) ''' Get Eventing processing stats ''' def get_event_processing_stats(self, name, eventing_map=None): if eventing_map is None: eventing_map = {} if self.eventing_role: authorization = self.get_authorization("eventing_admin", "password") else: authorization = self.get_authorization(self.username, self.password) url = "getEventProcessingStats?name=" + name api = self.eventing_baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'GET', headers=headers) if status: json_parsed = json.loads(content) for key in list(json_parsed.keys()): tokens = key.split(":") val = json_parsed[key] if len(tokens) == 1: field = tokens[0] eventing_map[field] = val return eventing_map ''' Get Aggregate Eventing processing stats ''' def get_aggregate_event_processing_stats(self, name, eventing_map=None): if eventing_map is None: eventing_map = {} if self.eventing_role: authorization = self.get_authorization("eventing_admin", "password") else: authorization = self.get_authorization(self.username, self.password) url = "getAggEventProcessingStats?name=" + name api = self.eventing_baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'GET', headers=headers) if status: json_parsed = json.loads(content) for key in list(json_parsed.keys()): tokens = key.split(":") val = json_parsed[key] if len(tokens) == 1: field = tokens[0] eventing_map[field] = val return eventing_map ''' Get Eventing execution stats ''' def get_event_execution_stats(self, name, eventing_map=None): if eventing_map is None: eventing_map = {} if self.eventing_role: authorization = self.get_authorization("eventing_admin", "password") else: authorization = self.get_authorization(self.username, self.password) url = "getExecutionStats?name=" + name api = self.eventing_baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'GET', headers=headers) if status: json_parsed = json.loads(content) for key in list(json_parsed.keys()): tokens = key.split(":") val = json_parsed[key] if len(tokens) == 1: field = tokens[0] eventing_map[field] = val return eventing_map ''' Get Eventing failure stats ''' def get_event_failure_stats(self, name, eventing_map=None): if eventing_map is None: eventing_map = {} if self.eventing_role: authorization = self.get_authorization("eventing_admin", "password") else: authorization = self.get_authorization(self.username, self.password) url = "getFailureStats?name=" + name api = self.eventing_baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'GET', headers=headers) if status: json_parsed = json.loads(content) for key in list(json_parsed.keys()): tokens = key.split(":") val = json_parsed[key] if len(tokens) == 1: field = tokens[0] eventing_map[field] = val return eventing_map ''' Get all eventing stats ''' def get_all_eventing_stats(self, seqs_processed=False, eventing_map=None): if eventing_map is None: eventing_map = {} if self.eventing_role: authorization = self.get_authorization("eventing_admin", "password") else: authorization = self.get_authorization(self.username, self.password) if seqs_processed: url = "api/v1/stats?type=full" else: url = "api/v1/stats" api = self.eventing_baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'GET', headers=headers) if not status: raise Exception(content) return json.loads(content) ''' Cleanup eventing ''' def cleanup_eventing(self): if self.eventing_role: authorization = self.get_authorization("eventing_admin", "password") else: authorization = self.get_authorization(self.username, self.password) url = "cleanupEventing" api = self.eventing_baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'GET', headers=headers) if not status: raise Exception(content) return content ''' enable debugger ''' def enable_eventing_debugger(self): authorization = self.get_authorization(self.username, self.password) url = "_p/event/api/v1/config" api = self.baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} body="{\"enable_debugger\": true}" status, content, header = self._http_request(api, 'POST', headers=headers, params=body) if not status: raise Exception(content) return content ''' disable debugger ''' def disable_eventing_debugger(self): authorization = self.get_authorization(self.username, self.password) url = "_p/event/api/v1/config" api = self.baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} body = "{\"enable_debugger\": false}" status, content, header = self._http_request(api, 'POST', headers=headers, params=body) if not status: raise Exception(content) return content ''' Start debugger ''' def start_eventing_debugger(self, name): authorization = self.get_authorization(self.username, self.password) url="/pools/default" api = self.baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'GET', headers=headers) url = "_p/event/startDebugger/?name=" + name api = self.baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'POST', headers=headers, params=content) if not status: raise Exception(content) return content ''' Stop debugger ''' def stop_eventing_debugger(self, name): authorization = self.get_authorization(self.username, self.password) url = "_p/event/stopDebugger/?name=" + name api = self.baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'POST', headers=headers) if not status: raise Exception(content) return content ''' Get debugger url ''' def get_eventing_debugger_url(self, name): authorization = self.get_authorization(self.username, self.password) url = "_p/event/getDebuggerUrl/?name=" + name api = self.baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'POST', headers=headers) if not status: raise Exception(content) return content ''' allow inter bucket recursion ''' def allow_interbucket_recursion(self): authorization = self.get_authorization(self.username, self.password) url = "_p/event/api/v1/config" api = self.baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} body = "{\"allow_interbucket_recursion\": true}" status, content, header = self._http_request(api, 'POST', headers=headers, params=body) if not status: raise Exception(content) return content ''' update eventing config ''' def update_eventing_config(self,body): authorization = self.get_authorization(self.username, self.password) url = "_p/event/api/v1/config" api = self.baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'POST', headers=headers, params=body) if not status: raise Exception(content) return content ''' GET eventing config ''' def get_eventing_config(self): authorization = self.get_authorization(self.username, self.password) url = "_p/event/api/v1/config" api = self.baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'GET', headers=headers, params='') if not status: raise Exception(content) return content ''' update eventing config function wise ''' def update_eventing_config_per_function(self, body, name): authorization = self.get_authorization(self.username, self.password) url = "api/v1/functions/" + name + "/config" api = self.eventing_baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'POST', headers=headers, params=json.dumps(body).encode("ascii", "ignore")) if not status: raise Exception(content) return content ''' GET eventing config for single function ''' def get_eventing_config_per_function(self, name): authorization = self.get_authorization(self.username, self.password) url = "api/v1/functions/" + name + "/config" api = self.eventing_baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'GET', headers=headers, params='') if not status: raise Exception(content) return content ''' Update function appcode ''' def update_function_appcode(self, body, name): authorization = self.get_authorization(self.username, self.password) url = "api/v1/functions/" + name + "/appcode" api = self.eventing_baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'POST', headers=headers, params=body) if not status: raise Exception(content) return content ''' Get function appcode ''' def get_function_appcode(self, name): authorization = self.get_authorization(self.username, self.password) url = "api/v1/functions/" + name + "/appcode" api = self.eventing_baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'GET', headers=headers, params='') if not status: raise Exception(content) return content ''' Get eventing rebalance status ''' def get_eventing_rebalance_status(self): if self.eventing_role: authorization = self.get_authorization("eventing_admin", "password") else: authorization = self.get_authorization(self.username, self.password) url = "getAggRebalanceStatus" api = self.eventing_baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'GET', headers=headers) if status: return content ''' Get application logs ''' def get_app_logs(self,handler_name): if self.eventing_role: authorization = self.get_authorization("eventing_admin", "password") else: authorization = self.get_authorization(self.username, self.password) url = "getAppLog?aggregate=true&name="+handler_name api = self.eventing_baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'GET', headers=headers) if status: return content def create_function(self, name, body): if self.eventing_role: authorization = self.get_authorization("eventing_admin", "password") else: authorization = self.get_authorization(self.username, self.password) url = "api/v1/functions/" + name api = self.eventing_baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'POST', headers=headers, params=json.dumps(body).encode("ascii", "ignore")) if not status: raise Exception(content) return content def update_function(self, name, body): if self.eventing_role: authorization = self.get_authorization("eventing_admin", "password") else: authorization = self.get_authorization(self.username, self.password) url = "api/v1/functions/" + name api = self.eventing_baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} body['appname']=name status, content, header = self._http_request(api, 'POST', headers=headers, params=json.dumps(body).encode("ascii", "ignore")) if not status: raise Exception(content) return content def get_function_details(self, name): if self.eventing_role: authorization = self.get_authorization("eventing_admin", "password") else: authorization = self.get_authorization(self.username, self.password) url = "api/v1/functions/" + name api = self.eventing_baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'GET', headers=headers) if not status: raise Exception(content) return content def get_eventing_go_routine_dumps(self): if self.eventing_role: authorization = self.get_authorization("eventing_admin", "password") else: authorization = self.get_authorization(self.username, self.password) url = "debug/pprof/goroutine?debug=1" api = self.eventing_baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'GET', headers=headers) if not status: raise Exception(content) return content def set_eventing_retry(self, name, body): if self.eventing_role: authorization = self.get_authorization("eventing_admin", "password") else: authorization = self.get_authorization(self.username, self.password) url = "api/v1/functions/" + name + "/retry" api = self.eventing_baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'POST', headers=headers, params=json.dumps(body).encode("ascii", "ignore")) if not status: raise Exception(content) return content def get_user(self, user_id): url = "settings/rbac/users/" api = self.baseUrl + url status, content, header = self._http_request(api, "GET") if content is not None: content_json = json.loads(content) for i in range(len(content_json)): user = content_json[i] if user.get('id') == user_id: return user return {} """ From 6.5.0, enable IPv6 on cluster/node needs 2 settings default is set to IPv6 We need to disable auto failover first, then set network version Then enable autofaiover again. """ def enable_ip_version(self, afamily='ipv6', afamilyOnly='false'): log.info("Start enable {0} on this node {1}".format(afamily, self.baseUrl)) self.update_autofailover_settings(False, 60) params = urllib.parse.urlencode({'afamily': afamily, 'afamilyOnly': afamilyOnly, 'nodeEncryption': 'off'}) api = "{0}node/controller/enableExternalListener".format(self.baseUrl) status, content, header = self._http_request(api, 'POST', params) if status: params = urllib.parse.urlencode({'afamily': afamily, 'afamilyOnly': afamilyOnly, 'nodeEncryption': 'off'}) api = "{0}node/controller/setupNetConfig".format(self.baseUrl) status, content, header = self._http_request(api, 'POST', params) if status: log.info("Done enable {0} on this node {1}".format(afamily, self.baseUrl)) else: log.error("Failed to set 'setupNetConfig' on this node {0}" .format(self.baseUrl)) raise Exception(content) else: log.error("Failed to set 'enableExternalListener' on this node {0}" .format(self.baseUrl)) raise Exception(content) if afamilyOnly == 'true': api = "{0}node/controller/disableUnusedExternalListeners".format(self.baseUrl) status, _, _ = self._http_request(api, 'POST', params) if not status: log.error("Failed to set 'disableUnusedExternalListeners' on this node {0}" .format(self.baseUrl)) self.update_autofailover_settings(True, 60) # These methods are added for Auto-Rebalance On Failure tests def set_retry_rebalance_settings(self, body): url = "settings/retryRebalance" api = self.baseUrl + url params = urllib.parse.urlencode(body) headers = self._create_headers() status, content, header = self._http_request(api, 'POST', headers=headers, params=params) if not status: raise Exception(content) return content def get_retry_rebalance_settings(self): authorization = self.get_authorization(self.username, self.password) url = "settings/retryRebalance" api = self.baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'GET', headers=headers) if not status: raise Exception(content) return content def get_pending_rebalance_info(self): authorization = self.get_authorization(self.username, self.password) url = "pools/default/pendingRetryRebalance" api = self.baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'GET', headers=headers) if not status: raise Exception(content) return content def cancel_pending_rebalance(self, id): authorization = self.get_authorization(self.username, self.password) url = "controller/cancelRebalanceRetry/" + str(id) api = self.baseUrl + url headers = {'Content-type': 'application/json', 'Authorization': 'Basic %s' % authorization} status, content, header = self._http_request(api, 'POST', headers=headers) if not status: raise Exception(content) return content # Upload a root certificate def upload_cluster_ca(self, certificate): """ Upload a certificate the cluster This can be a root certificate or an intermediate certificate. """ headers = self._create_capi_headers() headers['Content-Type'] = 'application/octet-stream' status, content, header = self._http_request(self.baseUrl + "controller/uploadClusterCA", 'POST', headers=headers, params=certificate) return status, content def load_trusted_CAs(self): """ Instructs the cluster to load trusted CAs(.pem files) from the node's inbox/CA folder """ status, content, header = self._http_request(self.baseUrl + "node/controller/loadTrustedCAs", 'POST') return status, content def reload_certificate(self, params=''): """ Reload certificate Call this function after uploading a certificate to the cluster to activate the new certificate. """ headers = self._create_capi_headers() status, content, header = self._http_request(self.baseUrl + "node/controller/reloadCertificate", 'POST', headers=headers, params=params) return status, content def get_trusted_CAs(self): """ Get all (default + uploaded) trusted CA certs information """ status, content, header = self._http_request(self.baseUrl + "/pools/default/trustedCAs", 'GET') return status, content def client_cert_auth(self, state, prefixes): """ Args: state (str): Either 'enable', 'mandatory' or 'disable'. prefixes (list(dict)): A list of dicts of containing the keys 'path', 'prefix' and 'delimiter' e.g. {"path": .., "prefix": .., "delimiter", ..} """ headers = self._create_capi_headers() params = json.dumps({'state': state, 'prefixes': prefixes}) status, content, header = self._http_request(self.baseUrl + "settings/clientCertAuth", 'POST', headers=headers, params=params) return status, content class MembaseServerVersion: def __init__(self, implementationVersion='', componentsVersion=''): self.implementationVersion = implementationVersion self.componentsVersion = componentsVersion # this class will also contain more node related info class OtpNode(object): def __init__(self, id='', status=''): self.id = id self.ip = '' self.replication = '' self.port = CbServer.port if CbServer.use_https: self.port = CbServer.ssl_port self.gracefulFailoverPossible = 'true' # extract ns ip from the otpNode string # its normally ns_1@10.20.30.40 if id.find('@') >= 0: self.ip = id[id.index('@') + 1:] if self.ip.count(':') > 0: # raw ipv6? enclose in square brackets self.ip = '[' + self.ip + ']' self.status = status class NodeInfo(object): def __init__(self): self.availableStorage = None # list self.memoryQuota = None class NodeDataStorage(object): def __init__(self): self.type = '' # hdd or ssd self.path = '' self.index_path = '' self.quotaMb = '' self.state = '' # ok def __str__(self): return '{0}'.format({'type': self.type, 'path': self.path, 'index_path' : self.index_path, 'quotaMb': self.quotaMb, 'state': self.state}) def get_data_path(self): return self.path def get_index_path(self): return self.index_path class NodeDiskStorage(object): def __init__(self): self.type = 0 self.path = '' self.sizeKBytes = 0 self.usagePercent = 0 class Bucket(object): def __init__(self, bucket_size='', name="", num_replicas=0, port=11211, master_id=None, type='', eviction_policy="valueOnly", bucket_priority=None, uuid="", lww=False, maxttl=None, bucket_storage=None): self.name = name self.port = port self.type = type self.nodes = None self.stats = None self.servers = [] self.vbuckets = [] self.forward_map = [] self.numReplicas = num_replicas self.bucket_size = bucket_size self.kvs = {1:KVStore()} self.master_id = master_id self.eviction_policy = eviction_policy self.bucket_priority = bucket_priority self.uuid = uuid self.lww = lww self.maxttl = maxttl self.bucket_storage = bucket_storage def __str__(self): return self.name class Node(object): def __init__(self): self.uptime = 0 self.memoryTotal = 0 self.memoryFree = 0 self.mcdMemoryReserved = 0 self.mcdMemoryAllocated = 0 self.status = "" self.hostname = "" self.clusterCompatibility = "" self.clusterMembership = "" self.recoveryType = "" self.version = "" self.os = "" self.ports = [] self.availableStorage = [] self.storage = [] self.memoryQuota = 0 self.moxi = 11211 self.memcached = 11210 self.id = "" self.ip = "" self.rest_username = "" self.rest_password = "" self.port = 8091 if CbServer.use_https: self.port = CbServer.ssl_port self.services = [] self.storageTotalRam = 0 @property def failed_over_state_a(self): """ The state in which a node is failed-over and is requesting a recovery type from the user """ return self.clusterMembership == "inactiveFailed" @property def failed_over_state_b(self): """ The state in which a node is failed-over and the user has selected a recovery type """ return self.clusterMembership == "inactiveAdded" and self.recoveryType @property def has_failed_over(self): """ Returns tree if a node is in the failed-over state """ return self.failed_over_state_a or self.failed_over_state_b @property def complete_version(self): """ Returns the complete version of the node (e.g. 6.5.0) """ return self.version.split('-')[0] @property def major_version(self): """ Returns the major version of the node (e.g. 6.5) """ return self.complete_version.rsplit('.', 1)[0] @property def minor_version(self): """ Returns the minor version of the node (e.g. 0) """ return self.complete_version.rsplit('.', 1)[1] class AutoFailoverSettings(object): def __init__(self): self.enabled = True self.timeout = 0 self.count = 0 self.failoverOnDataDiskIssuesEnabled = False self.failoverOnDataDiskIssuesTimeout = 0 self.maxCount = 1 self.failoverServerGroup = False self.can_abort_rebalance = False class AutoReprovisionSettings(object): def __init__(self): self.enabled = True self.max_nodes = 0 self.count = 0 class NodePort(object): def __init__(self): self.proxy = 0 self.direct = 0 class BucketStats(object): def __init__(self): self.opsPerSec = 0 self.itemCount = 0 self.diskUsed = 0 self.memUsed = 0 self.ram = 0 class vBucket(object): def __init__(self): self.master = '' self.replica = [] self.id = -1 class RestParser(object): def parse_index_status_response(self, parsed): index_map = {} for map in parsed["indexes"]: bucket_name = map['bucket'] if bucket_name not in list(index_map.keys()): index_map[bucket_name] = {} index_name = map['index'] index_map[bucket_name][index_name] = {} index_map[bucket_name][index_name]['status'] = map['status'] index_map[bucket_name][index_name]['progress'] = str(map['progress']) index_map[bucket_name][index_name]['definition'] = map['definition'] if len(map['hosts']) == 1: index_map[bucket_name][index_name]['hosts'] = map['hosts'][0] else: index_map[bucket_name][index_name]['hosts'] = map['hosts'] index_map[bucket_name][index_name]['id'] = map['id'] return index_map def parse_index_stats_response(self, parsed, index_map=None): if index_map == None: index_map = {} for key in list(parsed.keys()): tokens = key.split(":") val = parsed[key] if len(tokens) == 3 and 'MAINT_STREAM' not in tokens[0] and 'INIT_STREAM' not in tokens[0]: bucket = tokens[0] index_name = tokens[1] stats_name = tokens[2] if bucket not in list(index_map.keys()): index_map[bucket] = {} if index_name not in list(index_map[bucket].keys()): index_map[bucket][index_name] = {} index_map[bucket][index_name][stats_name] = val return index_map def parse_index_stats_response_collections(self, parsed, index_map=None): if index_map == None: index_map = {} for key in list(parsed.keys()): tokens = key.split(":") val = parsed[key] if len(tokens) == 3 and 'MAINT_STREAM' not in tokens[0] and 'INIT_STREAM' not in tokens[0]: bucket = tokens[0] index_name = tokens[1] stats_name = tokens[2] if bucket not in list(index_map.keys()): index_map[bucket] = {} if index_name not in list(index_map[bucket].keys()): index_map[bucket][index_name] = {} index_map[bucket][index_name][stats_name] = val elif len(tokens) == 5 and 'MAINT_STREAM' not in tokens[0] and 'INIT_STREAM' not in tokens[0]: bucket = tokens[0] scope_name = tokens[1] collection_name = tokens[2] index_name = tokens[3] stats_name = tokens[4] keyspace = f'default:{bucket}.{scope_name}.{collection_name}' if keyspace not in list(index_map.keys()): index_map[keyspace] = {} if index_name not in list(index_map[keyspace].keys()): index_map[keyspace][index_name] = {} index_map[keyspace][index_name][stats_name] = val return index_map def parse_get_nodes_response(self, parsed): node = Node() node.uptime = parsed['uptime'] node.memoryFree = parsed['memoryFree'] node.memoryTotal = parsed['memoryTotal'] node.mcdMemoryAllocated = parsed['mcdMemoryAllocated'] node.mcdMemoryReserved = parsed['mcdMemoryReserved'] node.status = parsed['status'] node.hostname = parsed['hostname'] node.clusterCompatibility = parsed['clusterCompatibility'] node.clusterMembership = parsed['clusterMembership'] if 'recoveryType' in parsed: node.recoveryType = parsed['recoveryType'] node.version = parsed['version'] node.curr_items = 0 if 'interestingStats' in parsed and 'curr_items' in parsed['interestingStats']: node.curr_items = parsed['interestingStats']['curr_items'] node.port = parsed["hostname"][parsed["hostname"].rfind(":") + 1:] if CbServer.use_https: str_node_port = CbServer.ssl_port_map.get(str(node.port), str(node.port)) if type(node.port) == int: node.port = int(str_node_port) node.os = parsed['os'] if "services" in parsed: node.services = parsed["services"] if "otpNode" in parsed: node.id = parsed["otpNode"] if "hostname" in parsed: # should work for both: ipv4 and ipv6 node.ip = parsed["hostname"].rsplit(":", 1)[0] # memoryQuota if 'memoryQuota' in parsed: node.memoryQuota = parsed['memoryQuota'] if 'availableStorage' in parsed: availableStorage = parsed['availableStorage'] for key in availableStorage: # let's assume there is only one disk in each noce dict_parsed = parsed['availableStorage'] if 'path' in dict_parsed and 'sizeKBytes' in dict_parsed and 'usagePercent' in dict_parsed: diskStorage = NodeDiskStorage() diskStorage.path = dict_parsed['path'] diskStorage.sizeKBytes = dict_parsed['sizeKBytes'] diskStorage.type = key diskStorage.usagePercent = dict_parsed['usagePercent'] node.availableStorage.append(diskStorage) log.info(diskStorage) if 'storage' in parsed: storage = parsed['storage'] for key in storage: disk_storage_list = storage[key] for dict_parsed in disk_storage_list: if 'path' in dict_parsed and 'state' in dict_parsed and 'quotaMb' in dict_parsed: dataStorage = NodeDataStorage() dataStorage.path = dict_parsed['path'] dataStorage.index_path = dict_parsed.get('index_path', '') dataStorage.quotaMb = dict_parsed['quotaMb'] dataStorage.state = dict_parsed['state'] dataStorage.type = key node.storage.append(dataStorage) # ports":{"proxy":11211,"direct":11210} if "ports" in parsed: ports = parsed["ports"] if "proxy" in ports: node.moxi = ports["proxy"] if "direct" in ports: node.memcached = ports["direct"] if CbServer.use_https: node.memcached = int(CbServer.ssl_port_map.get(str(node.memcached), str(node.memcached))) if "storageTotals" in parsed: storageTotals = parsed["storageTotals"] if storageTotals.get("ram"): if storageTotals["ram"].get("total"): ramKB = storageTotals["ram"]["total"] node.storageTotalRam = ramKB//(1024*1024) if node.mcdMemoryReserved == 0: node.mcdMemoryReserved = node.storageTotalRam if IS_CONTAINER: # the storage total values are more accurate than # mcdMemoryReserved - which is container host memory node.mcdMemoryReserved = node.storageTotalRam * 0.70 return node def parse_get_bucket_response(self, response): parsed = json.loads(response) return self.parse_get_bucket_json(parsed) def parse_get_bucket_json(self, parsed): bucket = Bucket() bucket.name = parsed['name'] bucket.uuid = parsed['uuid'] bucket.type = parsed['bucketType'] if 'proxyPort' in parsed: bucket.port = parsed['proxyPort'] bucket.nodes = list() if 'vBucketServerMap' in parsed: vBucketServerMap = parsed['vBucketServerMap'] serverList = vBucketServerMap['serverList'] bucket.servers.extend(serverList) if "numReplicas" in vBucketServerMap: bucket.numReplicas = vBucketServerMap["numReplicas"] # vBucketMapForward if 'vBucketMapForward' in vBucketServerMap: # let's gather the forward map vBucketMapForward = vBucketServerMap['vBucketMapForward'] counter = 0 for vbucket in vBucketMapForward: # there will be n number of replicas vbucketInfo = vBucket() vbucketInfo.master = serverList[vbucket[0]] if vbucket: for i in range(1, len(vbucket)): if vbucket[i] != -1: vbucketInfo.replica.append(serverList[vbucket[i]]) vbucketInfo.id = counter counter += 1 bucket.forward_map.append(vbucketInfo) vBucketMap = vBucketServerMap['vBucketMap'] counter = 0 for vbucket in vBucketMap: # there will be n number of replicas vbucketInfo = vBucket() vbucketInfo.master = serverList[vbucket[0]] if vbucket: for i in range(1, len(vbucket)): if vbucket[i] != -1: vbucketInfo.replica.append(serverList[vbucket[i]]) vbucketInfo.id = counter counter += 1 bucket.vbuckets.append(vbucketInfo) # now go through each vbucket and populate the info # who is master , who is replica # get the 'storageTotals' log.debug('read {0} vbuckets'.format(len(bucket.vbuckets))) stats = parsed['basicStats'] # vBucketServerMap bucketStats = BucketStats() log.debug('stats:{0}'.format(stats)) bucketStats.opsPerSec = stats['opsPerSec'] bucketStats.itemCount = stats['itemCount'] if bucket.type != "memcached": bucketStats.diskUsed = stats['diskUsed'] bucketStats.memUsed = stats['memUsed'] quota = parsed['quota'] bucketStats.ram = quota['ram'] bucket.stats = bucketStats nodes = parsed['nodes'] for nodeDictionary in nodes: node = Node() node.uptime = nodeDictionary['uptime'] node.memoryFree = nodeDictionary['memoryFree'] node.memoryTotal = nodeDictionary['memoryTotal'] node.mcdMemoryAllocated = nodeDictionary['mcdMemoryAllocated'] node.mcdMemoryReserved = nodeDictionary['mcdMemoryReserved'] node.status = nodeDictionary['status'] node.hostname = nodeDictionary['hostname'] if 'clusterCompatibility' in nodeDictionary: node.clusterCompatibility = nodeDictionary['clusterCompatibility'] if 'clusterMembership' in nodeDictionary: node.clusterCompatibility = nodeDictionary['clusterMembership'] node.version = nodeDictionary['version'] node.os = nodeDictionary['os'] if "ports" in nodeDictionary: ports = nodeDictionary["ports"] if "proxy" in ports: node.moxi = ports["proxy"] if "direct" in ports: node.memcached = ports["direct"] if CbServer.use_https: node.memcached = int(CbServer.ssl_port_map.get(str(node.memcached), str(node.memcached))) if "hostname" in nodeDictionary: value = str(nodeDictionary["hostname"]) node.ip = value[:value.rfind(":")] node.port = int(value[value.rfind(":") + 1:]) if CbServer.use_https: node.port = int(CbServer.ssl_port_map.get(str(node.port), str(node.port))) if "otpNode" in nodeDictionary: node.id = nodeDictionary["otpNode"] bucket.nodes.append(node) return bucket
# # Enumeration of various types of sony camera, options (maximum all mode, list of features in that model of camera) and cp_enumeration (number assigned to the feature in the API and GUI) # import enum import re # --------------- each camera has a set of enumerations for each functionality and its available options -------------------------------------- # =========================== White Balance ============================================================== class sonyClassWhiteBalanceCpEnums(enum.IntEnum): CrWhiteBalance_AWB = 0 CrWhiteBalance_Underwater_Auto = 1 CrWhiteBalance_Daylight = 17 CrWhiteBalance_Shadow = 18 CrWhiteBalance_Cloudy = 19 CrWhiteBalance_Tungsten = 20 CrWhiteBalance_Fluorescent = 32 CrWhiteBalance_Fluorescent_WarmWhite = 33 CrWhiteBalance_Fluorescent_CoolWhite = 34 CrWhiteBalance_Fluorescent_DayWhite = 35 CrWhiteBalance_Fluorescent_Daylight = 36 CrWhiteBalance_Flush = 48 CrWhiteBalance_ColorTemp = 256 CrWhiteBalance_Custom_1 = 257 CrWhiteBalance_Custom_2 = 258 CrWhiteBalance_Custom_3 = 259 CrWhiteBalance_Custom = 260 # # consider grabbing all params from camera and writing these automatically (its what list is pulled out for each camera) # This is the capability of the model # class sonyClassAlpha7WhiteBalanceOptions(enum.IntEnum): CrWhiteBalance_AWB = 0 CrWhiteBalance_Underwater_Auto = 10 CrWhiteBalance_Daylight = 1 CrWhiteBalance_Shadow = 2 CrWhiteBalance_Cloudy = 3 CrWhiteBalance_Tungsten = 4 CrWhiteBalance_Fluorescent_WarmWhite = 5 CrWhiteBalance_Fluorescent_CoolWhite = 6 CrWhiteBalance_Fluorescent_DayWhite = 7 CrWhiteBalance_Fluorescent_Daylight = 8 CrWhiteBalance_Flush = 9 CrWhiteBalance_ColorTemp = 11 CrWhiteBalance_Custom_1 = 12 CrWhiteBalance_Custom_2 = 13 CrWhiteBalance_Custom_3 = 14 # test unit only #CrWhiteBalance_Custom = 15 # =========================== Focus Mode ============================================================== class sonyClassFocusModeCpEnums(enum.IntEnum): CrFocus_MF = 1 CrFocus_AF_S = 2 CrFocus_AF_C = 3 CrFocus_AF_A = 4 CrFocus_AF_D = 5 CrFocus_DMF = 6 CrFocus_PF = 7 class sonyClassAlpha7FocusModeOptions(enum.IntEnum): CrFocus_MF = 4 CrFocus_AF_S = 0 CrFocus_AF_C = 2 CrFocus_AF_A = 1 CrFocus_AF_D = 5 CrFocus_DMF = 3 # =========================== Aperture ============================================================== class sonyClassApertureCpEnums(enum.IntEnum): F2_5 = 250 F2_8 = 280 F3_2 = 320 F3_5 = 350 F4_0 = 400 F4_5 = 450 F5_0 = 500 F5_6 = 560 F6_3 = 630 F7_1 = 710 F8_0 = 800 F9_0 = 900 F10 = 1000 F11 = 1100 F13 = 1300 F14 = 1400 F16 = 1600 F18 = 1800 F20 = 2000 F22 = 2200 class sonyClassAlpha7ApertureOptions(enum.IntEnum): F2_5 = 0 F2_8 = 1 F3_2 = 2 F3_5 = 3 F4_0 = 4 F4_5 = 5 F5_0 = 6 F5_6 = 7 F6_3 = 8 F7_1 = 9 F8_0 = 10 F9_0 = 11 F10 = 12 F11 = 13 F13 = 14 F14 = 15 F16 = 16 F18 = 17 F20 = 18 F22 = 19 # =========================== Iso ============================================================== class sonyClassIsoCpEnums(enum.IntEnum): ISO0 = 0 ISO50 = 50 ISO64 = 64 ISO80 = 80 ISO100 = 100 ISO125 = 125 ISO160 = 160 ISO200 = 200 ISO250 = 250 ISO320 = 320 ISO400 = 400 ISO500 = 500 ISO640 = 640 ISO800 = 800 ISO1000 = 1000 ISO1250 = 1250 ISO1600 = 1600 ISO2000 = 2000 ISO2500 = 2500 ISO3200 = 3200 ISO4000 = 4000 ISO5000 = 5000 ISO6400 = 6400 ISO8000 = 8000 ISO10000 = 10000 ISO12800 = 12800 ISO16000 = 16000 ISO20000 = 20000 ISO25600 = 25600 ISO32000 = 32000 ISO40000 = 40000 ISO51200 = 51200 ISO64000 = 64000 ISO80000 = 80000 ISO102400 = 102400 class sonyClassAlpha7IsoOptions(enum.IntEnum): ISO0 = 0 ISO50 = 1 ISO64 = 2 ISO80 = 3 ISO100 = 4 ISO125 = 5 ISO160 = 6 ISO200 = 7 ISO250 = 8 ISO320 = 9 ISO400 = 10 ISO500 = 11 ISO640 = 12 ISO800 = 13 ISO1000 = 14 ISO1250 = 15 ISO1600 = 16 ISO2000 = 17 ISO2500 = 18 ISO3200 = 19 ISO4000 = 20 ISO5000 = 21 ISO6400 = 22 ISO8000 = 23 ISO10000 = 24 ISO12800 = 25 ISO16000 = 26 ISO20000 = 27 ISO25600 = 28 ISO32000 = 29 ISO40000 = 30 ISO51200 = 31 ISO64000 = 32 ISO80000 = 33 ISO102400 = 34 # =========================== Focus Area ============================================================== class sonyClassFocusAreaCpEnums(enum.IntEnum): CrFocusArea_Unknown = 0 CrFocusArea_Wide = 1 CrFocusArea_Zone = 2 CrFocusArea_Center = 3 CrFocusArea_Flexible_Spot_S = 4 CrFocusArea_Flexible_Spot_M = 5 CrFocusArea_Flexible_Spot_L = 6 CrFocusArea_Expand_Flexible_Spot = 7 CrFocusArea_Flexible_Spot = 8 CrFocusArea_Tracking_Wide = 17 CrFocusArea_Tracking_Zone = 18 CrFocusArea_Tracking_Center = 19 CrFocusArea_Tracking_Flexible_Spot_S = 20 CrFocusArea_Tracking_Flexible_Spot_M = 21 CrFocusArea_Tracking_Flexible_Spot_L = 22 CrFocusArea_Tracking_Expand_Flexible_Spot = 23 CrFocusArea_Tracking_Flexible_Spot = 24 class sonyClassAlpha7FocusAreaOptions(enum.IntEnum): CrFocusArea_Wide = 0 CrFocusArea_Zone = 1 CrFocusArea_Center = 2 CrFocusArea_Flexible_Spot_S = 3 CrFocusArea_Flexible_Spot_M = 4 CrFocusArea_Flexible_Spot_L = 5 CrFocusArea_Expand_Flexible_Spot = 6 CrFocusArea_Flexible_Spot = 7 # =========================== Exposure Program ============================================================== class sonyClassExProCpEnums(enum.IntEnum): CrExposure_M_Manual = 1 CrExposure_P_Auto = 2 CrExposure_A_AperturePriority = 3 CrExposure_S_ShutterSpeedPriority = 4 CrExposure_Program_Creative = 5 CrExposure_Program_Action = 6 CrExposure_Portrait = 7 CrExposure_Auto = 32768 CrExposure_Auto_Plus = 32769 CrExposure_P_A = 32770 CrExposure_P_S = 32771 CrExposure_Sports_Action = 32772 CrExposure_Sprots_Action = 32773 CrExposure_Sunset = 32774 CrExposure_Night = 32775 CrExposure_Landscape = 32776 CrExposure_Macro = 32777 CrExposure_HandheldTwilight = 32778 CrExposure_NightPortrait = 32779 CrExposure_AntiMotionBlur = 32780 CrExposure_Pet = 32781 CrExposure_Gourmet = 32782 CrExposure_Fireworks = 32783 CrExposure_HighSensitivity = 32784 CrExposure_MemoryRecall = 32800 CrExposure_ContinuousPriority_AE_8pics = 32817 CrExposure_ContinuousPriority_AE_10pics = 32818 CrExposure_ContinuousPriority_AE_12pics = 32819 CrExposure_3D_SweepPanorama = 32832 CrExposure_SweepPanorama = 32833 CrExposure_Movie_P = 32848 CrExposure_Movie_A = 32849 CrExposure_Movie_S = 32850 CrExposure_Movie_M = 32851 CrExposure_Movie_Auto = 32852 CrExposure_Movie_SQMotion_P = 32857 CrExposure_Movie_SQMotion_A = 32858 CrExposure_Movie_SQMotion_S = 32859 CrExposure_Movie_SQMotion_M = 32860 CrExposure_Flash_Off = 32864 CrExposure_PictureEffect = 32880 CrExposure_HiFrameRate_P = 32896 CrExposure_HiFrameRate_A = 32897 CrExposure_HiFrameRate_S = 32898 CrExposure_HiFrameRate_M = 32899 CrExposure_SQMotion_P = 32900 CrExposure_SQMotion_A = 32901 CrExposure_SQMotion_S = 32902 CrExposure_SQMotion_M = 32903 CrExposure_MOVIE = 32904 CrExposure_STILL = 32905 class sonyClassAlpha7ExProOptions(enum.IntEnum): CrExposure_Movie_S = 2 CrExposure_Movie_P = 0 CrExposure_Movie_A = 1 CrExposure_Movie_M = 3 # =========================== Shutter Speed ============================================================== class sonyClassShutterSpeedCpEnums(enum.IntEnum): s0 = 0 s30 = 19660810 s25 = 16384010 s20 = 13107210 s15 = 9830410 s13 = 8519690 s10 = 6553610 s8 = 5242890 s6 = 3932170 s5 = 3276810 s4 = 2621450 s3_2 = 2097162 s2_5 = 1638410 s2 = 1310730 s1_6 = 1048586 s1_3 = 851978 s1 = 655370 s0_8 = 524298 s0_6 = 393226 s0_5 = 327690 s0_4 = 262154 s1d3 = 65539 s1d4 = 65540 s1d5 = 65541 s1d6 = 65542 s1d8 = 65544 s1d10 = 65546 s1d13 = 65549 s1d15 = 65551 s1d20 = 65556 s1d25 = 65561 s1d30 = 65566 s1d40 = 65576 s1d50 = 65586 s1d60 = 65596 s1d80 = 65616 s1d100 = 65636 s1d125 = 65661 s1d160 = 65696 s1d200 = 65736 s1d250 = 65786 s1d320 = 65856 s1d400 = 65936 s1d500 = 66036 s1d640 = 66176 s1d800 = 66336 s1d1000 = 66536 s1d1250 = 66786 s1d1600 = 67136 s1d2000 = 67536 s1d2500 = 68036 s1d3200 = 68736 s1d4000 = 69536 s1d5000 = 70536 s1d6400 = 71936 s1d8000 = 73536 class sonyClassAlpha7ShutterSpeedOptions(enum.IntEnum): s0 = 0 s30 = 1 s25 = 2 s20 = 3 s15 = 4 s13 = 5 s10 = 6 s8 = 7 s6 = 8 s5 = 9 s4 = 10 s3_2 = 11 s2_5 = 12 s2 = 13 s1_6 = 14 s1_3 = 15 s1 = 16 s0_8 = 17 s0_6 = 18 s0_5 = 19 s0_4 = 20 s1d3 = 21 s1d4 = 22 s1d5 = 23 s1d6 = 24 s1d8 = 25 s1d10 = 26 s1d13 = 27 s1d15 = 28 s1d20 = 29 s1d25 = 30 s1d30 = 31 s1d40 = 32 s1d50 = 33 s1d60 = 34 s1d80 = 35 s1d100 = 36 s1d125 = 37 s1d160 = 38 s1d200 = 39 s1d250 = 40 s1d320 = 41 s1d400 = 42 s1d500 = 43 s1d640 = 44 s1d800 = 45 s1d1000 = 46 s1d1250 = 47 s1d1600 = 48 s1d2000 = 49 s1d2500 = 50 s1d3200 = 51 s1d4000 = 52 s1d5000 = 53 s1d6400 = 54 s1d8000 = 55 # =========================== Still Capture mode ============================================================== class sonyClassStillCaptureCpEnums(enum.IntEnum): CrDrive_Single = 1 CrDrive_Continuous_Hi = 65537 CrDrive_Continuous_Hi_Plus = 65538 CrDrive_Continuous_Hi_Live = 65539 CrDrive_Continuous_Lo = 65540 CrDrive_Continuous = 65541 CrDrive_Continuous_SpeedPriority = 65542 CrDrive_Continuous_Mid = 65543 CrDrive_Continuous_Mid_Live = 65544 CrDrive_Continuous_Lo_Live = 65545 CrDrive_SingleBurstShooting_lo = 69633 CrDrive_SingleBurstShooting_mid = 69644 CrDrive_SingleBurstShooting_hi = 69645 CrDrive_Timelapse = 131073 CrDrive_Timer_2s = 196609 CrDrive_Timer_5s = 196610 CrDrive_Timer_10s = 196611 CrDrive_Continuous_Bracket_03Ev_3pics = 262913 CrDrive_Continuous_Bracket_03Ev_5pics = 262914 CrDrive_Continuous_Bracket_03Ev_9pics = 262915 CrDrive_Continuous_Bracket_05Ev_3pics = 262916 CrDrive_Continuous_Bracket_05Ev_5pics = 262917 CrDrive_Continuous_Bracket_05Ev_9pics = 262918 CrDrive_Continuous_Bracket_07Ev_3pics = 262919 CrDrive_Continuous_Bracket_07Ev_5pics = 262920 CrDrive_Continuous_Bracket_07Ev_9pics = 262921 CrDrive_Continuous_Bracket_10Ev_3pics = 262922 CrDrive_Continuous_Bracket_10Ev_5pics = 262923 CrDrive_Continuous_Bracket_10Ev_9pics = 262924 CrDrive_Continuous_Bracket_20Ev_3pics = 262925 CrDrive_Continuous_Bracket_20Ev_5pics = 262926 CrDrive_Continuous_Bracket_30Ev_3pics = 262927 CrDrive_Continuous_Bracket_30Ev_5pics = 262928 CrDrive_Single_Bracket_03Ev_3pics = 327681 CrDrive_Single_Bracket_03Ev_5pics = 327682 CrDrive_Single_Bracket_03Ev_9pics = 327683 CrDrive_Single_Bracket_05Ev_3pics = 327684 CrDrive_Single_Bracket_05Ev_5pics = 327685 CrDrive_Single_Bracket_05Ev_9pics = 327686 CrDrive_Single_Bracket_07Ev_3pics = 327687 CrDrive_Single_Bracket_07Ev_5pics = 327688 CrDrive_Single_Bracket_07Ev_9pics = 327689 CrDrive_Single_Bracket_10Ev_3pics = 327690 CrDrive_Single_Bracket_10Ev_5pics = 327691 CrDrive_Single_Bracket_10Ev_9pics = 327692 CrDrive_Single_Bracket_20Ev_3pics = 327693 CrDrive_Single_Bracket_20Ev_5pics = 327694 CrDrive_Single_Bracket_30Ev_3pics = 327695 CrDrive_Single_Bracket_30Ev_5pics = 327696 CrDrive_WB_Bracket_Lo = 393217 CrDrive_WB_Bracket_Hi = 393218 CrDrive_DRO_Bracket_Lo = 458753 CrDrive_DRO_Bracket_Hi = 458754 CrDrive_Continuous_Timer_3pics = 524289 CrDrive_Continuous_Timer_5pics = 524290 CrDrive_Continuous_Timer_2s_3pics = 524291 CrDrive_Continuous_Timer_2s_5pics = 524292 CrDrive_Continuous_Timer_5s_3pics = 524293 CrDrive_Continuous_Timer_5s_5pics = 524294 CrDrive_LPF_Bracket = 989681 CrDrive_RemoteCommander = 989682 CrDrive_MirrorUp = 989683 CrDrive_SelfPortrait_1 = 989684 CrDrive_SelfPortrait_2 = 989685 class sonyClassAlpha7StillCaptureOptions(enum.IntEnum): CrDrive_Continuous_Mid = 2 CrDrive_Single = 0 CrDrive_Continuous_Lo = 1 CrDrive_Continuous_Hi = 3 CrDrive_Continuous_Hi_Plus = 4 CrDrive_Timer_10s = 5 CrDrive_Timer_5s = 6 CrDrive_Timer_2s = 7 CrDrive_Continuous_Timer_3pics = 8 CrDrive_Continuous_Timer_5pics = 9 CrDrive_Continuous_Timer_5s_3pics = 10 CrDrive_Continuous_Timer_5s_5pics = 11 CrDrive_Continuous_Timer_2s_3pics = 12 CrDrive_Continuous_Timer_2s_5pics = 13 CrDrive_Continuous_Bracket_03Ev_3pics = 14 CrDrive_Continuous_Bracket_03Ev_5pics = 15 CrDrive_Continuous_Bracket_03Ev_9pics = 16 CrDrive_Continuous_Bracket_05Ev_3pics = 17 CrDrive_Continuous_Bracket_05Ev_5pics = 18 CrDrive_Continuous_Bracket_05Ev_9pics = 19 CrDrive_Continuous_Bracket_07Ev_3pics = 20 CrDrive_Continuous_Bracket_07Ev_5pics = 21 CrDrive_Continuous_Bracket_07Ev_9pics = 22 CrDrive_Continuous_Bracket_10Ev_3pics = 23 CrDrive_Continuous_Bracket_10Ev_5pics = 24 CrDrive_Continuous_Bracket_10Ev_9pics = 25 CrDrive_Continuous_Bracket_20Ev_3pics = 26 CrDrive_Continuous_Bracket_20Ev_5pics = 27 CrDrive_Continuous_Bracket_30Ev_3pics = 28 CrDrive_Continuous_Bracket_30Ev_5pics = 29 CrDrive_Single_Bracket_03Ev_3pics = 30 CrDrive_Single_Bracket_03Ev_5pics = 31 CrDrive_Single_Bracket_03Ev_9pics = 32 CrDrive_Single_Bracket_05Ev_3pics = 33 CrDrive_Single_Bracket_05Ev_5pics = 34 CrDrive_Single_Bracket_05Ev_9pics = 35 CrDrive_Single_Bracket_07Ev_3pics = 36 CrDrive_Single_Bracket_07Ev_5pics = 37 CrDrive_Single_Bracket_07Ev_9pics = 38 CrDrive_Single_Bracket_10Ev_3pics = 39 CrDrive_Single_Bracket_10Ev_5pics = 40 CrDrive_Single_Bracket_10Ev_9pics = 41 CrDrive_Single_Bracket_20Ev_3pics = 42 CrDrive_Single_Bracket_20Ev_5pics = 43 CrDrive_Single_Bracket_30Ev_3pics = 44 CrDrive_Single_Bracket_30Ev_5pics = 45 CrDrive_WB_Bracket_Hi = 46 CrDrive_WB_Bracket_Lo = 47 CrDrive_DRO_Bracket_Hi = 48 CrDrive_DRO_Bracket_Lo = 49 # # These are examples of other cameras at present set as dummy data # # ============================================== EClass Camera ========================================================================== # # ================================================ ISO ========================================================================================= # # # consider creating automatically from the .hpp file (its name and enumeration that the API understands) # This is the manufacture capability of the camera # class eClassApertureCpEnums(enum.IntEnum): F1_6 = 60 F3 = 18 F4 = 389 F6 = 4080 F20 = 5080 F600 = 999 # # consider grabbing all params from camera and writing these automatically (its what list is pulled out for each camera) # This is the capability of the model # class eClassModel1ApertureOptions(enum.IntEnum): F1_6 = 0 F3 = 1 F4 = 3 F6 = 4 F20 = 5 # # consider grabbing all params from camera and writing these automatically (its what list is pulled out for each camera) # This is the capability of the model # class eClassModel2ApertureOptions(enum.IntEnum): F1_6 = 0 F3 = 1 F4 = 3 F6 = 4 F20 = 5 F600 = 6 # ================================================ ISO ========================================================================================= class eClassIsoCpEnums(enum.IntEnum): ISO1 = 80 ISO2 = 100 ISO3 = 300 ISO4 = 400 ISO5 = 500 ISO6 = 600 # # consider grabbing all params from camera and writing these automatically (its what list is pulled out for each camera) # This is the capability of the model # class eClassModel1IsoOptions(enum.IntEnum): ISO2 = 0 ISO3 = 1 ISO5 = 2 ISO6 = 3 # # consider grabbing all params from camera and writing these automatically (its what list is pulled out for each camera) # This is the capability of the model # class eClassModel2IsoOptions(enum.IntEnum): ISO1 = 0 ISO2 = 1 ISO3 = 3 ISO4 = 4 ISO5 = 5 ISO6 = 6 # ============ FClass Camera =============================== # # class fClassApertureCpEnums(enum.IntEnum): F1_6 = 620 F3 = 1438 F4 = 3869 F6 = 4080 F20 = 504680 F60 = 4680 class fClassApertureOptions(enum.IntEnum): F1_6 = 0 F3 = 1 F4 = 2 F6 = 3 F20 = 5 # # each feature set is a list of class names which enumerate the class each way # FEATURE_SET_1 = { 'iso': [ sonyClassIsoCpEnums, sonyClassAlpha7IsoOptions ], 'white_bal': [ sonyClassWhiteBalanceCpEnums, sonyClassAlpha7WhiteBalanceOptions ], 'focus_mode': [ sonyClassFocusModeCpEnums, sonyClassAlpha7FocusModeOptions ], 'focus_area': [ sonyClassFocusAreaCpEnums, sonyClassAlpha7FocusAreaOptions ], 'shutter_speed': [ sonyClassShutterSpeedCpEnums, sonyClassAlpha7ShutterSpeedOptions ], 'still_cap_mode': [ sonyClassStillCaptureCpEnums, sonyClassAlpha7StillCaptureOptions ], 'aperture': [ sonyClassApertureCpEnums, sonyClassAlpha7ApertureOptions ], 'exposure_prog': [ sonyClassExProCpEnums, sonyClassAlpha7ExProOptions ], } # # these are dummy classes for other camera models to be added the dict can store details associated with available each mode # CpEnums : denotes what total features are in a type or which an (API) can perform # Options : denotes list of maximum options available for camera model # FEATURE_SET_5 = { 'iso': [ eClassIsoCpEnums, eClassModel1IsoOptions ], 'white_bal': [ eClassApertureCpEnums, eClassModel1ApertureOptions ], 'focus_mode': [ eClassApertureCpEnums, eClassModel1ApertureOptions ], 'focus_area': [ eClassApertureCpEnums, eClassModel1ApertureOptions ], 'shutter_speed': [ eClassApertureCpEnums, eClassModel1ApertureOptions ], 'still_cap_mode': [ eClassApertureCpEnums, eClassModel1ApertureOptions ], 'aperture': [ eClassApertureCpEnums, eClassModel1ApertureOptions ], } FEATURE_SET_2 = { 'exposure': (4, 6), } FEATURE_SET_3 = { 'iso': [ fClassApertureCpEnums, fClassApertureOptions ], 'white_bal': [ fClassApertureCpEnums, fClassApertureOptions ], 'focus_mode': [ fClassApertureCpEnums, fClassApertureOptions ], 'focus_area': [ fClassApertureCpEnums, fClassApertureOptions ], 'shutter_speed': [ fClassApertureCpEnums, fClassApertureOptions ], 'still_cap_mode': [ fClassApertureCpEnums, fClassApertureOptions ], 'aperture': [ fClassApertureCpEnums, fClassApertureOptions ], } FEATURE_SET_4 = { 'iso': [ eClassIsoCpEnums, eClassModel2IsoOptions ], 'white_bal': [ eClassApertureCpEnums, eClassModel2ApertureOptions ], 'focus_mode': [ eClassApertureCpEnums, eClassModel2ApertureOptions ], 'focus_area': [ eClassApertureCpEnums, eClassModel2ApertureOptions ], 'shutter_speed': [ eClassApertureCpEnums, eClassModel2ApertureOptions ], 'still_cap_mode': [ eClassApertureCpEnums, eClassModel2ApertureOptions ], 'aperture': [ eClassApertureCpEnums, eClassModel2ApertureOptions ], } # links the feature sets to the model this is what the camera can do and how it understands this data # CAMERA_FEATURE_DATA = { 'Alpha7': FEATURE_SET_5, 'RedEye': FEATURE_SET_2, 'Alpha6': FEATURE_SET_5, 'Alpha5': FEATURE_SET_3, 'Alpha9': FEATURE_SET_4, 'SonyAlfa7': FEATURE_SET_1, 'SonyAlfa7LHS': FEATURE_SET_1, 'SonyAlfa7RHS': FEATURE_SET_1, } # lists the model names with a type id (which used to branch in the main code) # CAMERA_MODELS_DATA = { 'SonyAlfa7': 1, 'RedEye': 2, 'Alpha6': 3, 'Alpha5': 4, 'Alpha9': 5, 'Alpha7': 6, 'SonyAlfa7LHS': 7, 'SonyAlfa7RHS': 8, } import re class sonyAlphaNewCamera(): # number from name # def match_name_enum( self, nameS,eClass ): #pattern = re.compile(nameS) for s in sorted(eClass): #if not s.name.find(nameS) == -1: if not (re.fullmatch(nameS, s.name)==None): #print(f" value {s.value} {s.value}") return s.value return None # name from number # def match_num_enum( self, nameV,eClass ): for s in sorted(eClass): if (s.value == nameV): #print(f" name {s.name} {s.value}") return s.name return None # option from enum # def classGetOptionFromCpEnum( self, cpEnumNum, camClassE, camClassO ): name=self.match_num_enum( cpEnumNum,camClassE ) if name is not None: return(self.match_name_enum( name, camClassO )) # enum from option # def classGetCpEnumFromOption( self, oPtion,camClassE, camClassO ): name=self.match_num_enum( oPtion,camClassE ) if name is not None: return(self.match_name_enum( name, camClassO )) # returns relevant data set and id number for a given camera name # # def getDataForModel(self, my_model): # python2 : for model_name, model_id in CAMERA_MODELS_DATA.iteritems(): #pattern = re.compile(my_model) for model_name, model_id in CAMERA_MODELS_DATA.items(): if not model_name == None: #if not model_name.find(my_model) == -1: if not (re.fullmatch(my_model, model_name)==None): my_data_set = CAMERA_FEATURE_DATA[model_name] return my_data_set,model_id return None # returns the camera name selected from the GUI # # def getModelForId(self, id): for model_name, model_id in CAMERA_MODELS_DATA.items(): if not model_name == None: if (model_id == id): my_data_set = CAMERA_FEATURE_DATA[model_name] return my_data_set,model_name return None # gets option # def getOptionFromEnum( self, enum, data_set, option ): list_of_enums = data_set[option] if len(list_of_enums) == 2: return( self.classGetOptionFromCpEnum( enum, list_of_enums[0], list_of_enums[1] ) ) return None # gets enum # def getEnumFromOption( self, opt, data_set, option ): list_of_enums = data_set[option] if len(list_of_enums) == 2: return( self.classGetCpEnumFromOption( opt, list_of_enums[1], list_of_enums[0] )) return None if __name__ == '__main__': droneCam = sonyAlphaNewCamera() cam1_data, cam1_id = droneCam.getDataForModel('Alpha7') print(f" a7:: aperture option for 5080 is {droneCam.getOptionFromEnum( 5080, cam1_data, "aperture" )}") print(f" a7:: aperture enum for 3 is {droneCam.getEnumFromOption( 3, cam1_data, "aperture" )}") print(f" a7:: aperture option for 4680 is {droneCam.getOptionFromEnum( 4680, cam1_data, "aperture" )}") print(f" a7:: aperture enum for 2 is {droneCam.getEnumFromOption( 2, cam1_data, "aperture" )}") print(f" a7:: aperture option for 999 is {droneCam.getOptionFromEnum( 999, cam1_data, "aperture" )}") print(f" a7:: iso enum for 2 is {droneCam.getEnumFromOption( 2, cam1_data, "iso" )}") print(f" a7:: iso option for 600 is {droneCam.getOptionFromEnum( 600, cam1_data, "iso" )}") cam1_data, cam1_id = droneCam.getDataForModel('Alpha9') print(f" a9:: aperture option for 999 is {droneCam.getOptionFromEnum( 999, cam1_data, "aperture" )}") print(f" a9:: iso enum for 2 is {droneCam.getEnumFromOption( 2, cam1_data, "iso" )}") print(f" a9:: iso option for 600 is {droneCam.getOptionFromEnum( 600, cam1_data, "iso" )}") # ------------------------------- test every mode for alpha 7 sony camera ---------------------------------------- # cam1_data, cam1_id = droneCam.getDataForModel('SonyAlfa7') print(f" sonyAlfa7:: whitebalance enum for 2 is {droneCam.getEnumFromOption( 2, cam1_data, "white_bal" )}") print(f" sonyAlfa7:: whitebalance option for 256 is {droneCam.getOptionFromEnum( 256, cam1_data, "white_bal" )}") print(f" sonyAlfa7:: whitebalance option for 260 is {droneCam.getOptionFromEnum( 260, cam1_data, "white_bal" )}") print(f" sonyAlfa7:: iso enum for 2 is {droneCam.getEnumFromOption( 2, cam1_data, "iso" )}") print(f" sonyAlfa7:: iso option for 800 is {droneCam.getOptionFromEnum( 800, cam1_data, "iso" )}") print(f" sonyAlfa7:: focus mode enum for 2 is {droneCam.getEnumFromOption( 2, cam1_data, "focus_mode" )}") print(f" sonyAlfa7:: focus mode option for 5 is {droneCam.getOptionFromEnum( 5, cam1_data, "focus_mode" )}") print(f" sonyAlfa7:: focus_area enum for 2 is {droneCam.getEnumFromOption( 2, cam1_data, "focus_area" )}") print(f" sonyAlfa7:: focus_area option for 6 is {droneCam.getOptionFromEnum( 6, cam1_data, "focus_area" )}") print(f" sonyAlfa7:: shutter_speed enum for 2 is {droneCam.getEnumFromOption( 2, cam1_data, "shutter_speed" )}") print(f" sonyAlfa7:: shutter_speed option for 66786 is {droneCam.getOptionFromEnum( 66786, cam1_data, "shutter_speed" )}") print(f" sonyAlfa7:: still_cap_mode enum for 2 is {droneCam.getEnumFromOption( 2, cam1_data, "still_cap_mode" )}") print(f" sonyAlfa7:: still_cap_mode option for 65541 is {droneCam.getOptionFromEnum( 65541, cam1_data, "still_cap_mode" )}") print(f" sonyAlfa7:: still_cap_mode option for 65540 is {droneCam.getOptionFromEnum( 65540, cam1_data, "still_cap_mode" )}") print(f" sonyAlfa7:: aperture enum for 2 is {droneCam.getEnumFromOption( 2, cam1_data, "aperture" )}") print(f" sonyAlfa7:: aperture option for 1300 is {droneCam.getOptionFromEnum( 1300, cam1_data, "aperture" )}")
# # Enumeration of various types of sony camera, options (maximum all mode, list of features in that model of camera) and cp_enumeration (number assigned to the feature in the API and GUI) # import enum import re # --------------- each camera has a set of enumerations for each functionality and its available options -------------------------------------- # =========================== White Balance ============================================================== class sonyClassWhiteBalanceCpEnums(enum.IntEnum): CrWhiteBalance_AWB = 0 CrWhiteBalance_Underwater_Auto = 1 CrWhiteBalance_Daylight = 17 CrWhiteBalance_Shadow = 18 CrWhiteBalance_Cloudy = 19 CrWhiteBalance_Tungsten = 20 CrWhiteBalance_Fluorescent = 32 CrWhiteBalance_Fluorescent_WarmWhite = 33 CrWhiteBalance_Fluorescent_CoolWhite = 34 CrWhiteBalance_Fluorescent_DayWhite = 35 CrWhiteBalance_Fluorescent_Daylight = 36 CrWhiteBalance_Flush = 48 CrWhiteBalance_ColorTemp = 256 CrWhiteBalance_Custom_1 = 257 CrWhiteBalance_Custom_2 = 258 CrWhiteBalance_Custom_3 = 259 CrWhiteBalance_Custom = 260 # # consider grabbing all params from camera and writing these automatically (its what list is pulled out for each camera) # This is the capability of the model # class sonyClassAlpha7WhiteBalanceOptions(enum.IntEnum): CrWhiteBalance_AWB = 0 CrWhiteBalance_Underwater_Auto = 10 CrWhiteBalance_Daylight = 1 CrWhiteBalance_Shadow = 2 CrWhiteBalance_Cloudy = 3 CrWhiteBalance_Tungsten = 4 CrWhiteBalance_Fluorescent_WarmWhite = 5 CrWhiteBalance_Fluorescent_CoolWhite = 6 CrWhiteBalance_Fluorescent_DayWhite = 7 CrWhiteBalance_Fluorescent_Daylight = 8 CrWhiteBalance_Flush = 9 CrWhiteBalance_ColorTemp = 11 CrWhiteBalance_Custom_1 = 12 CrWhiteBalance_Custom_2 = 13 CrWhiteBalance_Custom_3 = 14 # test unit only #CrWhiteBalance_Custom = 15 # =========================== Focus Mode ============================================================== class sonyClassFocusModeCpEnums(enum.IntEnum): CrFocus_MF = 1 CrFocus_AF_S = 2 CrFocus_AF_C = 3 CrFocus_AF_A = 4 CrFocus_AF_D = 5 CrFocus_DMF = 6 CrFocus_PF = 7 class sonyClassAlpha7FocusModeOptions(enum.IntEnum): CrFocus_MF = 4 CrFocus_AF_S = 0 CrFocus_AF_C = 2 CrFocus_AF_A = 1 CrFocus_AF_D = 5 CrFocus_DMF = 3 # =========================== Aperture ============================================================== class sonyClassApertureCpEnums(enum.IntEnum): F2_5 = 250 F2_8 = 280 F3_2 = 320 F3_5 = 350 F4_0 = 400 F4_5 = 450 F5_0 = 500 F5_6 = 560 F6_3 = 630 F7_1 = 710 F8_0 = 800 F9_0 = 900 F10 = 1000 F11 = 1100 F13 = 1300 F14 = 1400 F16 = 1600 F18 = 1800 F20 = 2000 F22 = 2200 class sonyClassAlpha7ApertureOptions(enum.IntEnum): F2_5 = 0 F2_8 = 1 F3_2 = 2 F3_5 = 3 F4_0 = 4 F4_5 = 5 F5_0 = 6 F5_6 = 7 F6_3 = 8 F7_1 = 9 F8_0 = 10 F9_0 = 11 F10 = 12 F11 = 13 F13 = 14 F14 = 15 F16 = 16 F18 = 17 F20 = 18 F22 = 19 # =========================== Iso ============================================================== class sonyClassIsoCpEnums(enum.IntEnum): ISO0 = 0 ISO50 = 50 ISO64 = 64 ISO80 = 80 ISO100 = 100 ISO125 = 125 ISO160 = 160 ISO200 = 200 ISO250 = 250 ISO320 = 320 ISO400 = 400 ISO500 = 500 ISO640 = 640 ISO800 = 800 ISO1000 = 1000 ISO1250 = 1250 ISO1600 = 1600 ISO2000 = 2000 ISO2500 = 2500 ISO3200 = 3200 ISO4000 = 4000 ISO5000 = 5000 ISO6400 = 6400 ISO8000 = 8000 ISO10000 = 10000 ISO12800 = 12800 ISO16000 = 16000 ISO20000 = 20000 ISO25600 = 25600 ISO32000 = 32000 ISO40000 = 40000 ISO51200 = 51200 ISO64000 = 64000 ISO80000 = 80000 ISO102400 = 102400 class sonyClassAlpha7IsoOptions(enum.IntEnum): ISO0 = 0 ISO50 = 1 ISO64 = 2 ISO80 = 3 ISO100 = 4 ISO125 = 5 ISO160 = 6 ISO200 = 7 ISO250 = 8 ISO320 = 9 ISO400 = 10 ISO500 = 11 ISO640 = 12 ISO800 = 13 ISO1000 = 14 ISO1250 = 15 ISO1600 = 16 ISO2000 = 17 ISO2500 = 18 ISO3200 = 19 ISO4000 = 20 ISO5000 = 21 ISO6400 = 22 ISO8000 = 23 ISO10000 = 24 ISO12800 = 25 ISO16000 = 26 ISO20000 = 27 ISO25600 = 28 ISO32000 = 29 ISO40000 = 30 ISO51200 = 31 ISO64000 = 32 ISO80000 = 33 ISO102400 = 34 # =========================== Focus Area ============================================================== class sonyClassFocusAreaCpEnums(enum.IntEnum): CrFocusArea_Unknown = 0 CrFocusArea_Wide = 1 CrFocusArea_Zone = 2 CrFocusArea_Center = 3 CrFocusArea_Flexible_Spot_S = 4 CrFocusArea_Flexible_Spot_M = 5 CrFocusArea_Flexible_Spot_L = 6 CrFocusArea_Expand_Flexible_Spot = 7 CrFocusArea_Flexible_Spot = 8 CrFocusArea_Tracking_Wide = 17 CrFocusArea_Tracking_Zone = 18 CrFocusArea_Tracking_Center = 19 CrFocusArea_Tracking_Flexible_Spot_S = 20 CrFocusArea_Tracking_Flexible_Spot_M = 21 CrFocusArea_Tracking_Flexible_Spot_L = 22 CrFocusArea_Tracking_Expand_Flexible_Spot = 23 CrFocusArea_Tracking_Flexible_Spot = 24 class sonyClassAlpha7FocusAreaOptions(enum.IntEnum): CrFocusArea_Wide = 0 CrFocusArea_Zone = 1 CrFocusArea_Center = 2 CrFocusArea_Flexible_Spot_S = 3 CrFocusArea_Flexible_Spot_M = 4 CrFocusArea_Flexible_Spot_L = 5 CrFocusArea_Expand_Flexible_Spot = 6 CrFocusArea_Flexible_Spot = 7 # =========================== Exposure Program ============================================================== class sonyClassExProCpEnums(enum.IntEnum): CrExposure_M_Manual = 1 CrExposure_P_Auto = 2 CrExposure_A_AperturePriority = 3 CrExposure_S_ShutterSpeedPriority = 4 CrExposure_Program_Creative = 5 CrExposure_Program_Action = 6 CrExposure_Portrait = 7 CrExposure_Auto = 32768 CrExposure_Auto_Plus = 32769 CrExposure_P_A = 32770 CrExposure_P_S = 32771 CrExposure_Sports_Action = 32772 CrExposure_Sprots_Action = 32773 CrExposure_Sunset = 32774 CrExposure_Night = 32775 CrExposure_Landscape = 32776 CrExposure_Macro = 32777 CrExposure_HandheldTwilight = 32778 CrExposure_NightPortrait = 32779 CrExposure_AntiMotionBlur = 32780 CrExposure_Pet = 32781 CrExposure_Gourmet = 32782 CrExposure_Fireworks = 32783 CrExposure_HighSensitivity = 32784 CrExposure_MemoryRecall = 32800 CrExposure_ContinuousPriority_AE_8pics = 32817 CrExposure_ContinuousPriority_AE_10pics = 32818 CrExposure_ContinuousPriority_AE_12pics = 32819 CrExposure_3D_SweepPanorama = 32832 CrExposure_SweepPanorama = 32833 CrExposure_Movie_P = 32848 CrExposure_Movie_A = 32849 CrExposure_Movie_S = 32850 CrExposure_Movie_M = 32851 CrExposure_Movie_Auto = 32852 CrExposure_Movie_SQMotion_P = 32857 CrExposure_Movie_SQMotion_A = 32858 CrExposure_Movie_SQMotion_S = 32859 CrExposure_Movie_SQMotion_M = 32860 CrExposure_Flash_Off = 32864 CrExposure_PictureEffect = 32880 CrExposure_HiFrameRate_P = 32896 CrExposure_HiFrameRate_A = 32897 CrExposure_HiFrameRate_S = 32898 CrExposure_HiFrameRate_M = 32899 CrExposure_SQMotion_P = 32900 CrExposure_SQMotion_A = 32901 CrExposure_SQMotion_S = 32902 CrExposure_SQMotion_M = 32903 CrExposure_MOVIE = 32904 CrExposure_STILL = 32905 class sonyClassAlpha7ExProOptions(enum.IntEnum): CrExposure_Movie_S = 2 CrExposure_Movie_P = 0 CrExposure_Movie_A = 1 CrExposure_Movie_M = 3 # =========================== Shutter Speed ============================================================== class sonyClassShutterSpeedCpEnums(enum.IntEnum): s0 = 0 s30 = 19660810 s25 = 16384010 s20 = 13107210 s15 = 9830410 s13 = 8519690 s10 = 6553610 s8 = 5242890 s6 = 3932170 s5 = 3276810 s4 = 2621450 s3_2 = 2097162 s2_5 = 1638410 s2 = 1310730 s1_6 = 1048586 s1_3 = 851978 s1 = 655370 s0_8 = 524298 s0_6 = 393226 s0_5 = 327690 s0_4 = 262154 s1d3 = 65539 s1d4 = 65540 s1d5 = 65541 s1d6 = 65542 s1d8 = 65544 s1d10 = 65546 s1d13 = 65549 s1d15 = 65551 s1d20 = 65556 s1d25 = 65561 s1d30 = 65566 s1d40 = 65576 s1d50 = 65586 s1d60 = 65596 s1d80 = 65616 s1d100 = 65636 s1d125 = 65661 s1d160 = 65696 s1d200 = 65736 s1d250 = 65786 s1d320 = 65856 s1d400 = 65936 s1d500 = 66036 s1d640 = 66176 s1d800 = 66336 s1d1000 = 66536 s1d1250 = 66786 s1d1600 = 67136 s1d2000 = 67536 s1d2500 = 68036 s1d3200 = 68736 s1d4000 = 69536 s1d5000 = 70536 s1d6400 = 71936 s1d8000 = 73536 class sonyClassAlpha7ShutterSpeedOptions(enum.IntEnum): s0 = 0 s30 = 1 s25 = 2 s20 = 3 s15 = 4 s13 = 5 s10 = 6 s8 = 7 s6 = 8 s5 = 9 s4 = 10 s3_2 = 11 s2_5 = 12 s2 = 13 s1_6 = 14 s1_3 = 15 s1 = 16 s0_8 = 17 s0_6 = 18 s0_5 = 19 s0_4 = 20 s1d3 = 21 s1d4 = 22 s1d5 = 23 s1d6 = 24 s1d8 = 25 s1d10 = 26 s1d13 = 27 s1d15 = 28 s1d20 = 29 s1d25 = 30 s1d30 = 31 s1d40 = 32 s1d50 = 33 s1d60 = 34 s1d80 = 35 s1d100 = 36 s1d125 = 37 s1d160 = 38 s1d200 = 39 s1d250 = 40 s1d320 = 41 s1d400 = 42 s1d500 = 43 s1d640 = 44 s1d800 = 45 s1d1000 = 46 s1d1250 = 47 s1d1600 = 48 s1d2000 = 49 s1d2500 = 50 s1d3200 = 51 s1d4000 = 52 s1d5000 = 53 s1d6400 = 54 s1d8000 = 55 # =========================== Still Capture mode ============================================================== class sonyClassStillCaptureCpEnums(enum.IntEnum): CrDrive_Single = 1 CrDrive_Continuous_Hi = 65537 CrDrive_Continuous_Hi_Plus = 65538 CrDrive_Continuous_Hi_Live = 65539 CrDrive_Continuous_Lo = 65540 CrDrive_Continuous = 65541 CrDrive_Continuous_SpeedPriority = 65542 CrDrive_Continuous_Mid = 65543 CrDrive_Continuous_Mid_Live = 65544 CrDrive_Continuous_Lo_Live = 65545 CrDrive_SingleBurstShooting_lo = 69633 CrDrive_SingleBurstShooting_mid = 69644 CrDrive_SingleBurstShooting_hi = 69645 CrDrive_Timelapse = 131073 CrDrive_Timer_2s = 196609 CrDrive_Timer_5s = 196610 CrDrive_Timer_10s = 196611 CrDrive_Continuous_Bracket_03Ev_3pics = 262913 CrDrive_Continuous_Bracket_03Ev_5pics = 262914 CrDrive_Continuous_Bracket_03Ev_9pics = 262915 CrDrive_Continuous_Bracket_05Ev_3pics = 262916 CrDrive_Continuous_Bracket_05Ev_5pics = 262917 CrDrive_Continuous_Bracket_05Ev_9pics = 262918 CrDrive_Continuous_Bracket_07Ev_3pics = 262919 CrDrive_Continuous_Bracket_07Ev_5pics = 262920 CrDrive_Continuous_Bracket_07Ev_9pics = 262921 CrDrive_Continuous_Bracket_10Ev_3pics = 262922 CrDrive_Continuous_Bracket_10Ev_5pics = 262923 CrDrive_Continuous_Bracket_10Ev_9pics = 262924 CrDrive_Continuous_Bracket_20Ev_3pics = 262925 CrDrive_Continuous_Bracket_20Ev_5pics = 262926 CrDrive_Continuous_Bracket_30Ev_3pics = 262927 CrDrive_Continuous_Bracket_30Ev_5pics = 262928 CrDrive_Single_Bracket_03Ev_3pics = 327681 CrDrive_Single_Bracket_03Ev_5pics = 327682 CrDrive_Single_Bracket_03Ev_9pics = 327683 CrDrive_Single_Bracket_05Ev_3pics = 327684 CrDrive_Single_Bracket_05Ev_5pics = 327685 CrDrive_Single_Bracket_05Ev_9pics = 327686 CrDrive_Single_Bracket_07Ev_3pics = 327687 CrDrive_Single_Bracket_07Ev_5pics = 327688 CrDrive_Single_Bracket_07Ev_9pics = 327689 CrDrive_Single_Bracket_10Ev_3pics = 327690 CrDrive_Single_Bracket_10Ev_5pics = 327691 CrDrive_Single_Bracket_10Ev_9pics = 327692 CrDrive_Single_Bracket_20Ev_3pics = 327693 CrDrive_Single_Bracket_20Ev_5pics = 327694 CrDrive_Single_Bracket_30Ev_3pics = 327695 CrDrive_Single_Bracket_30Ev_5pics = 327696 CrDrive_WB_Bracket_Lo = 393217 CrDrive_WB_Bracket_Hi = 393218 CrDrive_DRO_Bracket_Lo = 458753 CrDrive_DRO_Bracket_Hi = 458754 CrDrive_Continuous_Timer_3pics = 524289 CrDrive_Continuous_Timer_5pics = 524290 CrDrive_Continuous_Timer_2s_3pics = 524291 CrDrive_Continuous_Timer_2s_5pics = 524292 CrDrive_Continuous_Timer_5s_3pics = 524293 CrDrive_Continuous_Timer_5s_5pics = 524294 CrDrive_LPF_Bracket = 989681 CrDrive_RemoteCommander = 989682 CrDrive_MirrorUp = 989683 CrDrive_SelfPortrait_1 = 989684 CrDrive_SelfPortrait_2 = 989685 class sonyClassAlpha7StillCaptureOptions(enum.IntEnum): CrDrive_Continuous_Mid = 2 CrDrive_Single = 0 CrDrive_Continuous_Lo = 1 CrDrive_Continuous_Hi = 3 CrDrive_Continuous_Hi_Plus = 4 CrDrive_Timer_10s = 5 CrDrive_Timer_5s = 6 CrDrive_Timer_2s = 7 CrDrive_Continuous_Timer_3pics = 8 CrDrive_Continuous_Timer_5pics = 9 CrDrive_Continuous_Timer_5s_3pics = 10 CrDrive_Continuous_Timer_5s_5pics = 11 CrDrive_Continuous_Timer_2s_3pics = 12 CrDrive_Continuous_Timer_2s_5pics = 13 CrDrive_Continuous_Bracket_03Ev_3pics = 14 CrDrive_Continuous_Bracket_03Ev_5pics = 15 CrDrive_Continuous_Bracket_03Ev_9pics = 16 CrDrive_Continuous_Bracket_05Ev_3pics = 17 CrDrive_Continuous_Bracket_05Ev_5pics = 18 CrDrive_Continuous_Bracket_05Ev_9pics = 19 CrDrive_Continuous_Bracket_07Ev_3pics = 20 CrDrive_Continuous_Bracket_07Ev_5pics = 21 CrDrive_Continuous_Bracket_07Ev_9pics = 22 CrDrive_Continuous_Bracket_10Ev_3pics = 23 CrDrive_Continuous_Bracket_10Ev_5pics = 24 CrDrive_Continuous_Bracket_10Ev_9pics = 25 CrDrive_Continuous_Bracket_20Ev_3pics = 26 CrDrive_Continuous_Bracket_20Ev_5pics = 27 CrDrive_Continuous_Bracket_30Ev_3pics = 28 CrDrive_Continuous_Bracket_30Ev_5pics = 29 CrDrive_Single_Bracket_03Ev_3pics = 30 CrDrive_Single_Bracket_03Ev_5pics = 31 CrDrive_Single_Bracket_03Ev_9pics = 32 CrDrive_Single_Bracket_05Ev_3pics = 33 CrDrive_Single_Bracket_05Ev_5pics = 34 CrDrive_Single_Bracket_05Ev_9pics = 35 CrDrive_Single_Bracket_07Ev_3pics = 36 CrDrive_Single_Bracket_07Ev_5pics = 37 CrDrive_Single_Bracket_07Ev_9pics = 38 CrDrive_Single_Bracket_10Ev_3pics = 39 CrDrive_Single_Bracket_10Ev_5pics = 40 CrDrive_Single_Bracket_10Ev_9pics = 41 CrDrive_Single_Bracket_20Ev_3pics = 42 CrDrive_Single_Bracket_20Ev_5pics = 43 CrDrive_Single_Bracket_30Ev_3pics = 44 CrDrive_Single_Bracket_30Ev_5pics = 45 CrDrive_WB_Bracket_Hi = 46 CrDrive_WB_Bracket_Lo = 47 CrDrive_DRO_Bracket_Hi = 48 CrDrive_DRO_Bracket_Lo = 49 # # These are examples of other cameras at present set as dummy data # # ============================================== EClass Camera ========================================================================== # # ================================================ ISO ========================================================================================= # # # consider creating automatically from the .hpp file (its name and enumeration that the API understands) # This is the manufacture capability of the camera # class eClassApertureCpEnums(enum.IntEnum): F1_6 = 60 F3 = 18 F4 = 389 F6 = 4080 F20 = 5080 F600 = 999 # # consider grabbing all params from camera and writing these automatically (its what list is pulled out for each camera) # This is the capability of the model # class eClassModel1ApertureOptions(enum.IntEnum): F1_6 = 0 F3 = 1 F4 = 3 F6 = 4 F20 = 5 # # consider grabbing all params from camera and writing these automatically (its what list is pulled out for each camera) # This is the capability of the model # class eClassModel2ApertureOptions(enum.IntEnum): F1_6 = 0 F3 = 1 F4 = 3 F6 = 4 F20 = 5 F600 = 6 # ================================================ ISO ========================================================================================= class eClassIsoCpEnums(enum.IntEnum): ISO1 = 80 ISO2 = 100 ISO3 = 300 ISO4 = 400 ISO5 = 500 ISO6 = 600 # # consider grabbing all params from camera and writing these automatically (its what list is pulled out for each camera) # This is the capability of the model # class eClassModel1IsoOptions(enum.IntEnum): ISO2 = 0 ISO3 = 1 ISO5 = 2 ISO6 = 3 # # consider grabbing all params from camera and writing these automatically (its what list is pulled out for each camera) # This is the capability of the model # class eClassModel2IsoOptions(enum.IntEnum): ISO1 = 0 ISO2 = 1 ISO3 = 3 ISO4 = 4 ISO5 = 5 ISO6 = 6 # ============ FClass Camera =============================== # # class fClassApertureCpEnums(enum.IntEnum): F1_6 = 620 F3 = 1438 F4 = 3869 F6 = 4080 F20 = 504680 F60 = 4680 class fClassApertureOptions(enum.IntEnum): F1_6 = 0 F3 = 1 F4 = 2 F6 = 3 F20 = 5 # # each feature set is a list of class names which enumerate the class each way # FEATURE_SET_1 = { 'iso': [ sonyClassIsoCpEnums, sonyClassAlpha7IsoOptions ], 'white_bal': [ sonyClassWhiteBalanceCpEnums, sonyClassAlpha7WhiteBalanceOptions ], 'focus_mode': [ sonyClassFocusModeCpEnums, sonyClassAlpha7FocusModeOptions ], 'focus_area': [ sonyClassFocusAreaCpEnums, sonyClassAlpha7FocusAreaOptions ], 'shutter_speed': [ sonyClassShutterSpeedCpEnums, sonyClassAlpha7ShutterSpeedOptions ], 'still_cap_mode': [ sonyClassStillCaptureCpEnums, sonyClassAlpha7StillCaptureOptions ], 'aperture': [ sonyClassApertureCpEnums, sonyClassAlpha7ApertureOptions ], 'exposure_prog': [ sonyClassExProCpEnums, sonyClassAlpha7ExProOptions ], } # # these are dummy classes for other camera models to be added the dict can store details associated with available each mode # CpEnums : denotes what total features are in a type or which an (API) can perform # Options : denotes list of maximum options available for camera model # FEATURE_SET_5 = { 'iso': [ eClassIsoCpEnums, eClassModel1IsoOptions ], 'white_bal': [ eClassApertureCpEnums, eClassModel1ApertureOptions ], 'focus_mode': [ eClassApertureCpEnums, eClassModel1ApertureOptions ], 'focus_area': [ eClassApertureCpEnums, eClassModel1ApertureOptions ], 'shutter_speed': [ eClassApertureCpEnums, eClassModel1ApertureOptions ], 'still_cap_mode': [ eClassApertureCpEnums, eClassModel1ApertureOptions ], 'aperture': [ eClassApertureCpEnums, eClassModel1ApertureOptions ], } FEATURE_SET_2 = { 'exposure': (4, 6), } FEATURE_SET_3 = { 'iso': [ fClassApertureCpEnums, fClassApertureOptions ], 'white_bal': [ fClassApertureCpEnums, fClassApertureOptions ], 'focus_mode': [ fClassApertureCpEnums, fClassApertureOptions ], 'focus_area': [ fClassApertureCpEnums, fClassApertureOptions ], 'shutter_speed': [ fClassApertureCpEnums, fClassApertureOptions ], 'still_cap_mode': [ fClassApertureCpEnums, fClassApertureOptions ], 'aperture': [ fClassApertureCpEnums, fClassApertureOptions ], } FEATURE_SET_4 = { 'iso': [ eClassIsoCpEnums, eClassModel2IsoOptions ], 'white_bal': [ eClassApertureCpEnums, eClassModel2ApertureOptions ], 'focus_mode': [ eClassApertureCpEnums, eClassModel2ApertureOptions ], 'focus_area': [ eClassApertureCpEnums, eClassModel2ApertureOptions ], 'shutter_speed': [ eClassApertureCpEnums, eClassModel2ApertureOptions ], 'still_cap_mode': [ eClassApertureCpEnums, eClassModel2ApertureOptions ], 'aperture': [ eClassApertureCpEnums, eClassModel2ApertureOptions ], } # links the feature sets to the model this is what the camera can do and how it understands this data # CAMERA_FEATURE_DATA = { 'Alpha7': FEATURE_SET_5, 'RedEye': FEATURE_SET_2, 'Alpha6': FEATURE_SET_5, 'Alpha5': FEATURE_SET_3, 'Alpha9': FEATURE_SET_4, 'SonyAlfa7': FEATURE_SET_1, 'SonyAlfa7LHS': FEATURE_SET_1, 'SonyAlfa7RHS': FEATURE_SET_1, } # lists the model names with a type id (which used to branch in the main code) # CAMERA_MODELS_DATA = { 'SonyAlfa7': 1, 'RedEye': 2, 'Alpha6': 3, 'Alpha5': 4, 'Alpha9': 5, 'Alpha7': 6, 'SonyAlfa7LHS': 7, 'SonyAlfa7RHS': 8, } import re class sonyAlphaNewCamera(): # number from name # def match_name_enum( self, nameS,eClass ): #pattern = re.compile(nameS) for s in sorted(eClass): #if not s.name.find(nameS) == -1: if not (re.fullmatch(nameS, s.name)==None): #print(f" value {s.value} {s.value}") return s.value return None # name from number # def match_num_enum( self, nameV,eClass ): for s in sorted(eClass): if (s.value == nameV): #print(f" name {s.name} {s.value}") return s.name return None # option from enum # def classGetOptionFromCpEnum( self, cpEnumNum, camClassE, camClassO ): name=self.match_num_enum( cpEnumNum,camClassE ) if name is not None: return(self.match_name_enum( name, camClassO )) # enum from option # def classGetCpEnumFromOption( self, oPtion,camClassE, camClassO ): name=self.match_num_enum( oPtion,camClassE ) if name is not None: return(self.match_name_enum( name, camClassO )) # returns relevant data set and id number for a given camera name # # def getDataForModel(self, my_model): # python2 : for model_name, model_id in CAMERA_MODELS_DATA.iteritems(): #pattern = re.compile(my_model) for model_name, model_id in CAMERA_MODELS_DATA.items(): if not model_name == None: #if not model_name.find(my_model) == -1: if not (re.fullmatch(my_model, model_name)==None): my_data_set = CAMERA_FEATURE_DATA[model_name] return my_data_set,model_id return None # returns the camera name selected from the GUI # # def getModelForId(self, id): for model_name, model_id in CAMERA_MODELS_DATA.items(): if not model_name == None: if (model_id == id): my_data_set = CAMERA_FEATURE_DATA[model_name] return my_data_set,model_name return None # gets option # def getOptionFromEnum( self, enum, data_set, option ): list_of_enums = data_set[option] if len(list_of_enums) == 2: return( self.classGetOptionFromCpEnum( enum, list_of_enums[0], list_of_enums[1] ) ) return None # gets enum # def getEnumFromOption( self, opt, data_set, option ): list_of_enums = data_set[option] if len(list_of_enums) == 2: return( self.classGetCpEnumFromOption( opt, list_of_enums[1], list_of_enums[0] )) return None if __name__ == '__main__': droneCam = sonyAlphaNewCamera() cam1_data, cam1_id = droneCam.getDataForModel('Alpha7') print(f" a7:: aperture option for 5080 is {droneCam.getOptionFromEnum( 5080, cam1_data, 'aperture' )}") print(f" a7:: aperture enum for 3 is {droneCam.getEnumFromOption( 3, cam1_data, 'aperture' )}") print(f" a7:: aperture option for 4680 is {droneCam.getOptionFromEnum( 4680, cam1_data, 'aperture' )}") print(f" a7:: aperture enum for 2 is {droneCam.getEnumFromOption( 2, cam1_data, 'aperture' )}") print(f" a7:: aperture option for 999 is {droneCam.getOptionFromEnum( 999, cam1_data, 'aperture' )}") print(f" a7:: iso enum for 2 is {droneCam.getEnumFromOption( 2, cam1_data, 'iso' )}") print(f" a7:: iso option for 600 is {droneCam.getOptionFromEnum( 600, cam1_data, 'iso' )}") cam1_data, cam1_id = droneCam.getDataForModel('Alpha9') print(f" a9:: aperture option for 999 is {droneCam.getOptionFromEnum( 999, cam1_data, 'aperture' )}") print(f" a9:: iso enum for 2 is {droneCam.getEnumFromOption( 2, cam1_data, 'iso' )}") print(f" a9:: iso option for 600 is {droneCam.getOptionFromEnum( 600, cam1_data, 'iso' )}") # ------------------------------- test every mode for alpha 7 sony camera ---------------------------------------- # cam1_data, cam1_id = droneCam.getDataForModel('SonyAlfa7') print(f" sonyAlfa7:: whitebalance enum for 2 is {droneCam.getEnumFromOption( 2, cam1_data, 'white_bal' )}") print(f" sonyAlfa7:: whitebalance option for 256 is {droneCam.getOptionFromEnum( 256, cam1_data, 'white_bal' )}") print(f" sonyAlfa7:: whitebalance option for 260 is {droneCam.getOptionFromEnum( 260, cam1_data, 'white_bal' )}") print(f" sonyAlfa7:: iso enum for 2 is {droneCam.getEnumFromOption( 2, cam1_data, 'iso' )}") print(f" sonyAlfa7:: iso option for 800 is {droneCam.getOptionFromEnum( 800, cam1_data, 'iso' )}") print(f" sonyAlfa7:: focus mode enum for 2 is {droneCam.getEnumFromOption( 2, cam1_data, 'focus_mode' )}") print(f" sonyAlfa7:: focus mode option for 5 is {droneCam.getOptionFromEnum( 5, cam1_data, 'focus_mode' )}") print(f" sonyAlfa7:: focus_area enum for 2 is {droneCam.getEnumFromOption( 2, cam1_data, 'focus_area' )}") print(f" sonyAlfa7:: focus_area option for 6 is {droneCam.getOptionFromEnum( 6, cam1_data, 'focus_area' )}") print(f" sonyAlfa7:: shutter_speed enum for 2 is {droneCam.getEnumFromOption( 2, cam1_data, 'shutter_speed' )}") print(f" sonyAlfa7:: shutter_speed option for 66786 is {droneCam.getOptionFromEnum( 66786, cam1_data, 'shutter_speed' )}") print(f" sonyAlfa7:: still_cap_mode enum for 2 is {droneCam.getEnumFromOption( 2, cam1_data, 'still_cap_mode' )}") print(f" sonyAlfa7:: still_cap_mode option for 65541 is {droneCam.getOptionFromEnum( 65541, cam1_data, 'still_cap_mode' )}") print(f" sonyAlfa7:: still_cap_mode option for 65540 is {droneCam.getOptionFromEnum( 65540, cam1_data, 'still_cap_mode' )}") print(f" sonyAlfa7:: aperture enum for 2 is {droneCam.getEnumFromOption( 2, cam1_data, 'aperture' )}") print(f" sonyAlfa7:: aperture option for 1300 is {droneCam.getOptionFromEnum( 1300, cam1_data, 'aperture' )}")
import traceback import discord, datetime, json, random from discord.ext import commands import aiohttp, io from random import randint from yarl import URL import fast_colorthief start_time = datetime.datetime.utcnow() class General(commands.Cog): def __init__(self, bot): self.bot = bot @commands.command(name="help") @commands.guild_only() async def help(self, ctx): dm = ctx.author embed=discord.Embed(title="Help", color=discord.Color.green()) embed.add_field(name="Parcility", value="`[[query: package]]`\n`!package <query: package>`\n`!repo <query: repo>`", inline=False) embed.add_field(name="Homebrew", value="`{{query: package/application}}`\n`!brew <query: package>`\n`!cask <query: application>`", inline=False) embed.add_field(name="IPSW Downloads", value="`!firmware <device: identifier/name>`", inline=False) embed.add_field(name="General", value="`!jumbo <emoji: mention>`\n`!userinfo [user: mention/id]`\n`!pfp [user: mention/id]`\n`!ping`\n`!cat`\n`!catgirl`\n`!catboy`", inline=False) embed.add_field(name="Moderation", value="`!purge <amount: integer>`\n`!kick <user: mention/id>`\n`!ban <user: mention/id>`\n`!unban <user: id>`", inline=False) embed.add_field(name="Source / Invite", value='https://github.com/xstecky/Table-Bot', inline=True) embed.add_field(name="Discord", value='https://diatr.us/discord', inline=False) now = datetime.datetime.utcnow() # Timestamp of when uptime function is run delta = now - start_time hours, remainder = divmod(int(delta.total_seconds()), 3600) minutes, seconds = divmod(remainder, 60) days, hours = divmod(hours, 24) if days: time_format = "{d} days, {h} hours, {m} minutes, and {s} seconds" else: time_format = "{h} hours, {m} minutes, and {s} seconds" uptime_stamp = time_format.format(d=days, h=hours, m=minutes, s=seconds) embed.set_footer(text=f'Online for {uptime_stamp}') try: await dm.send(embed=embed) await ctx.send('📬') except: await ctx.send(embed=embed) @commands.command(name='jumbo', aliases=['e','enlarge','emoji']) @commands.guild_only() async def jumbo(self, ctx, emoji: discord.PartialEmoji = None): if emoji is None: embed = discord.Embed(title="Error", color=discord.Color.red()) embed.description = f'You must specify an emoji to enlarge!' await ctx.message.delete(delay=15) await ctx.send(embed=embed, delete_after=15) else: async with aiohttp.ClientSession() as client: async with client.get(URL(str(emoji.url))) as img: image_bytes = io.BytesIO(await img.read()) rgb = fast_colorthief.get_dominant_color(image_bytes, quality=1) color = int(f'{rgb[0]:02x}{rgb[1]:02x}{rgb[2]:02x}', 16) embed = discord.Embed(title=emoji.name, color=color) embed.set_image(url=emoji.url) await ctx.send(embed=embed) @commands.command(name='avatar', aliases=['pfp']) @commands.guild_only() async def avatar(self, ctx, user: discord.Member = None): user = user or ctx.author async with aiohttp.ClientSession() as client: async with client.get(URL(str("https://cdn.discordapp.com/avatars/{0.id}/{0.avatar}.png?size=16".format(user)))) as img: image_bytes = io.BytesIO(await img.read()) rgb = fast_colorthief.get_dominant_color(image_bytes, quality=1) color = int(f'{rgb[0]:02x}{rgb[1]:02x}{rgb[2]:02x}', 16) embed = discord.Embed(title=user.display_name, color=color) if user.is_avatar_animated(): embed.add_field(name="View as", value=f'[gif]({'https://cdn.discordapp.com/avatars/{0.id}/{0.avatar}.gif?size=1024)'.format(user)} [png]({'https://cdn.discordapp.com/avatars/{0.id}/{0.avatar}.png?size=1024)'.format(user)} [jpg]({'https://cdn.discordapp.com/avatars/{0.id}/{0.avatar}.jpg?size=1024)'.format(user)} [webp]({'https://cdn.discordapp.com/avatars/{0.id}/{0.avatar}.webp?size=1024)'.format(user)}', inline=False) else: embed.add_field(name="View as", value=f'[png]({'https://cdn.discordapp.com/avatars/{0.id}/{0.avatar}.png?size=1024)'.format(user)} [jpg]({'https://cdn.discordapp.com/avatars/{0.id}/{0.avatar}.jpg?size=1024)'.format(user)} [webp]({'https://cdn.discordapp.com/avatars/{0.id}/{0.avatar}.webp?size=1024)'.format(user)}', inline=False) embed.set_image(url=user.avatar_url) await ctx.send(embed=embed) @commands.command(name="info", aliases=['userinfo', 'ui']) @commands.guild_only() async def info(self, ctx, user: discord.Member = None): user = user or ctx.author roles = "" if isinstance(user, discord.Member): for role in user.roles: if role != ctx.guild.default_role: roles += role.mention + " " else: roles = "No roles." joined = f"User not in {ctx.guild.name}." async with aiohttp.ClientSession() as client: async with client.get(URL(str("https://cdn.discordapp.com/avatars/{0.id}/{0.avatar}.png".format(user)))) as img: image_bytes = io.BytesIO(await img.read()) rgb = fast_colorthief.get_dominant_color(image_bytes, quality=1) color = int(f'{rgb[0]:02x}{rgb[1]:02x}{rgb[2]:02x}', 16) embed=discord.Embed(title="User Info", description=f"{user.mention} ({user.id})", color=color) embed.add_field(name="Created On", value=user.created_at.strftime("%B %d, %Y"), inline=True) embed.add_field(name="Joined On", value=user.joined_at.strftime("%B %d, %Y"), inline=True) embed.add_field(name="Roles", value=roles if roles else "None", inline=False) embed.set_thumbnail(url=user.avatar_url) await ctx.send(embed=embed) @commands.command(name="ping") @commands.guild_only() async def ping(self, ctx): lag = round(self.bot.latency*1000, 1) if lag >= 50: embed = discord.Embed(title="Pong!", color=discord.Color.red()) if lag <= 49: embed = discord.Embed(title="Pong!", color=discord.Color.green()) embed.description = f'Latency is {lag} ms.' await ctx.send(embed=embed) @commands.command(name="catgirl") @commands.guild_only() async def catgirl(self, ctx): async with aiohttp.ClientSession() as client: async with client.get(URL('https://nekos.life/api/v2/img/neko', encoded=True)) as resp: if resp.status == 200: response = json.loads(await resp.text()) image = response.get('url') async with aiohttp.ClientSession() as client: async with client.get(URL(str(image))) as img: image_bytes = io.BytesIO(await img.read()) rgb = fast_colorthief.get_dominant_color(image_bytes, quality=1) color = int(f'{rgb[0]:02x}{rgb[1]:02x}{rgb[2]:02x}', 16) embed = discord.Embed(color=color) embed.set_image(url=image) await ctx.send(embed=embed) @commands.command(name="catboy") @commands.guild_only() async def catboy(self, ctx): async with aiohttp.ClientSession() as client: async with client.get(URL('https://api.catboys.com/img', encoded=True)) as resp: if resp.status == 200: response = json.loads(await resp.text()) image = response.get('url') async with aiohttp.ClientSession() as client: async with client.get(URL(str(image))) as img: image_bytes = io.BytesIO(await img.read()) rgb = fast_colorthief.get_dominant_color(image_bytes, quality=1) color = int(f'{rgb[0]:02x}{rgb[1]:02x}{rgb[2]:02x}', 16) embed = discord.Embed(color=color) embed.set_image(url=image) await ctx.send(embed=embed) #@commands.command(name="cat", aliases=['peepee']) #@commands.guild_only() #async def cat(self, ctx): # photonumber = randint(1, 947) # async with aiohttp.ClientSession() as client: # async with client.get(URL(str(f"https://assets.stkc.win/botpeepee/{photonumber}.jpg"))) as img: # image_bytes = io.BytesIO(await img.read()) # cf = ColorThief(image_bytes) # dc = cf.get_color(quality=1) # rgb = dc # color = int(f'{rgb[0]:02x}{rgb[1]:02x}{rgb[2]:02x}', 16) # embed = discord.Embed(color=color) # embed.set_image(url=f"https://assets.stkc.win/botpeepee/{photonumber}.jpg") # await ctx.send(embed=embed) @commands.command(name="cat", aliases=['peepee']) @commands.guild_only() async def cat(self, ctx): try: photonumber = randint(1, 947) async with aiohttp.ClientSession() as client: async with client.get(URL(str(f"https://assets.stkc.win/botpeepee/{photonumber}.jpg"))) as img: image_bytes = io.BytesIO(await img.read()) rgb = fast_colorthief.get_dominant_color(image_bytes, quality=1) color = int(f'{rgb[0]:02x}{rgb[1]:02x}{rgb[2]:02x}', 16) embed = discord.Embed(color=color) embed.set_image(url=f"https://assets.stkc.win/botpeepee/{photonumber}.jpg") await ctx.send(embed=embed) except Exception as e: await ctx.send(e) def setup(bot): bot.add_cog(General(bot))
import traceback import discord, datetime, json, random from discord.ext import commands import aiohttp, io from random import randint from yarl import URL import fast_colorthief start_time = datetime.datetime.utcnow() class General(commands.Cog): def __init__(self, bot): self.bot = bot @commands.command(name="help") @commands.guild_only() async def help(self, ctx): dm = ctx.author embed=discord.Embed(title="Help", color=discord.Color.green()) embed.add_field(name="Parcility", value="`[[query: package]]`\n`!package <query: package>`\n`!repo <query: repo>`", inline=False) embed.add_field(name="Homebrew", value="`{{query: package/application}}`\n`!brew <query: package>`\n`!cask <query: application>`", inline=False) embed.add_field(name="IPSW Downloads", value="`!firmware <device: identifier/name>`", inline=False) embed.add_field(name="General", value="`!jumbo <emoji: mention>`\n`!userinfo [user: mention/id]`\n`!pfp [user: mention/id]`\n`!ping`\n`!cat`\n`!catgirl`\n`!catboy`", inline=False) embed.add_field(name="Moderation", value="`!purge <amount: integer>`\n`!kick <user: mention/id>`\n`!ban <user: mention/id>`\n`!unban <user: id>`", inline=False) embed.add_field(name="Source / Invite", value='https://github.com/xstecky/Table-Bot', inline=True) embed.add_field(name="Discord", value='https://diatr.us/discord', inline=False) now = datetime.datetime.utcnow() # Timestamp of when uptime function is run delta = now - start_time hours, remainder = divmod(int(delta.total_seconds()), 3600) minutes, seconds = divmod(remainder, 60) days, hours = divmod(hours, 24) if days: time_format = "{d} days, {h} hours, {m} minutes, and {s} seconds" else: time_format = "{h} hours, {m} minutes, and {s} seconds" uptime_stamp = time_format.format(d=days, h=hours, m=minutes, s=seconds) embed.set_footer(text=f'Online for {uptime_stamp}') try: await dm.send(embed=embed) await ctx.send('📬') except: await ctx.send(embed=embed) @commands.command(name='jumbo', aliases=['e','enlarge','emoji']) @commands.guild_only() async def jumbo(self, ctx, emoji: discord.PartialEmoji = None): if emoji is None: embed = discord.Embed(title="Error", color=discord.Color.red()) embed.description = f'You must specify an emoji to enlarge!' await ctx.message.delete(delay=15) await ctx.send(embed=embed, delete_after=15) else: async with aiohttp.ClientSession() as client: async with client.get(URL(str(emoji.url))) as img: image_bytes = io.BytesIO(await img.read()) rgb = fast_colorthief.get_dominant_color(image_bytes, quality=1) color = int(f'{rgb[0]:02x}{rgb[1]:02x}{rgb[2]:02x}', 16) embed = discord.Embed(title=emoji.name, color=color) embed.set_image(url=emoji.url) await ctx.send(embed=embed) @commands.command(name='avatar', aliases=['pfp']) @commands.guild_only() async def avatar(self, ctx, user: discord.Member = None): user = user or ctx.author async with aiohttp.ClientSession() as client: async with client.get(URL(str("https://cdn.discordapp.com/avatars/{0.id}/{0.avatar}.png?size=16".format(user)))) as img: image_bytes = io.BytesIO(await img.read()) rgb = fast_colorthief.get_dominant_color(image_bytes, quality=1) color = int(f'{rgb[0]:02x}{rgb[1]:02x}{rgb[2]:02x}', 16) embed = discord.Embed(title=user.display_name, color=color) if user.is_avatar_animated(): embed.add_field(name="View as", value=f'[gif]({"https://cdn.discordapp.com/avatars/{0.id}/{0.avatar}.gif?size=1024)".format(user)} [png]({"https://cdn.discordapp.com/avatars/{0.id}/{0.avatar}.png?size=1024)".format(user)} [jpg]({"https://cdn.discordapp.com/avatars/{0.id}/{0.avatar}.jpg?size=1024)".format(user)} [webp]({"https://cdn.discordapp.com/avatars/{0.id}/{0.avatar}.webp?size=1024)".format(user)}', inline=False) else: embed.add_field(name="View as", value=f'[png]({"https://cdn.discordapp.com/avatars/{0.id}/{0.avatar}.png?size=1024)".format(user)} [jpg]({"https://cdn.discordapp.com/avatars/{0.id}/{0.avatar}.jpg?size=1024)".format(user)} [webp]({"https://cdn.discordapp.com/avatars/{0.id}/{0.avatar}.webp?size=1024)".format(user)}', inline=False) embed.set_image(url=user.avatar_url) await ctx.send(embed=embed) @commands.command(name="info", aliases=['userinfo', 'ui']) @commands.guild_only() async def info(self, ctx, user: discord.Member = None): user = user or ctx.author roles = "" if isinstance(user, discord.Member): for role in user.roles: if role != ctx.guild.default_role: roles += role.mention + " " else: roles = "No roles." joined = f"User not in {ctx.guild.name}." async with aiohttp.ClientSession() as client: async with client.get(URL(str("https://cdn.discordapp.com/avatars/{0.id}/{0.avatar}.png".format(user)))) as img: image_bytes = io.BytesIO(await img.read()) rgb = fast_colorthief.get_dominant_color(image_bytes, quality=1) color = int(f'{rgb[0]:02x}{rgb[1]:02x}{rgb[2]:02x}', 16) embed=discord.Embed(title="User Info", description=f"{user.mention} ({user.id})", color=color) embed.add_field(name="Created On", value=user.created_at.strftime("%B %d, %Y"), inline=True) embed.add_field(name="Joined On", value=user.joined_at.strftime("%B %d, %Y"), inline=True) embed.add_field(name="Roles", value=roles if roles else "None", inline=False) embed.set_thumbnail(url=user.avatar_url) await ctx.send(embed=embed) @commands.command(name="ping") @commands.guild_only() async def ping(self, ctx): lag = round(self.bot.latency*1000, 1) if lag >= 50: embed = discord.Embed(title="Pong!", color=discord.Color.red()) if lag <= 49: embed = discord.Embed(title="Pong!", color=discord.Color.green()) embed.description = f'Latency is {lag} ms.' await ctx.send(embed=embed) @commands.command(name="catgirl") @commands.guild_only() async def catgirl(self, ctx): async with aiohttp.ClientSession() as client: async with client.get(URL('https://nekos.life/api/v2/img/neko', encoded=True)) as resp: if resp.status == 200: response = json.loads(await resp.text()) image = response.get('url') async with aiohttp.ClientSession() as client: async with client.get(URL(str(image))) as img: image_bytes = io.BytesIO(await img.read()) rgb = fast_colorthief.get_dominant_color(image_bytes, quality=1) color = int(f'{rgb[0]:02x}{rgb[1]:02x}{rgb[2]:02x}', 16) embed = discord.Embed(color=color) embed.set_image(url=image) await ctx.send(embed=embed) @commands.command(name="catboy") @commands.guild_only() async def catboy(self, ctx): async with aiohttp.ClientSession() as client: async with client.get(URL('https://api.catboys.com/img', encoded=True)) as resp: if resp.status == 200: response = json.loads(await resp.text()) image = response.get('url') async with aiohttp.ClientSession() as client: async with client.get(URL(str(image))) as img: image_bytes = io.BytesIO(await img.read()) rgb = fast_colorthief.get_dominant_color(image_bytes, quality=1) color = int(f'{rgb[0]:02x}{rgb[1]:02x}{rgb[2]:02x}', 16) embed = discord.Embed(color=color) embed.set_image(url=image) await ctx.send(embed=embed) #@commands.command(name="cat", aliases=['peepee']) #@commands.guild_only() #async def cat(self, ctx): # photonumber = randint(1, 947) # async with aiohttp.ClientSession() as client: # async with client.get(URL(str(f"https://assets.stkc.win/botpeepee/{photonumber}.jpg"))) as img: # image_bytes = io.BytesIO(await img.read()) # cf = ColorThief(image_bytes) # dc = cf.get_color(quality=1) # rgb = dc # color = int(f'{rgb[0]:02x}{rgb[1]:02x}{rgb[2]:02x}', 16) # embed = discord.Embed(color=color) # embed.set_image(url=f"https://assets.stkc.win/botpeepee/{photonumber}.jpg") # await ctx.send(embed=embed) @commands.command(name="cat", aliases=['peepee']) @commands.guild_only() async def cat(self, ctx): try: photonumber = randint(1, 947) async with aiohttp.ClientSession() as client: async with client.get(URL(str(f"https://assets.stkc.win/botpeepee/{photonumber}.jpg"))) as img: image_bytes = io.BytesIO(await img.read()) rgb = fast_colorthief.get_dominant_color(image_bytes, quality=1) color = int(f'{rgb[0]:02x}{rgb[1]:02x}{rgb[2]:02x}', 16) embed = discord.Embed(color=color) embed.set_image(url=f"https://assets.stkc.win/botpeepee/{photonumber}.jpg") await ctx.send(embed=embed) except Exception as e: await ctx.send(e) def setup(bot): bot.add_cog(General(bot))
from typing import TYPE_CHECKING, Dict, List from ray.tune.checkpoint_manager import Checkpoint if TYPE_CHECKING: from ray.tune.trial import Trial class Callback: """Tune base callback that can be extended and passed to a ``TrialRunner`` Tune callbacks are called from within the ``TrialRunner`` class. There are several hooks that can be used, all of which are found in the submethod definitions of this base class. The parameters passed to the ``**info`` dict vary between hooks. The parameters passed are described in the docstrings of the methods. This example will print a metric each time a result is received: .. code-block:: python from ray import tune from ray.tune import Callback class MyCallback(Callback): def on_trial_result(self, iteration, trials, trial, result, **info): print(f"Got result: {result["metric"]}") def train(config): for i in range(10): tune.report(metric=i) tune.run( train, callbacks=[MyCallback()]) """ def setup(self): """Called once at the very beginning of training. Any Callback setup should be added here (setting environment variables, etc.) """ pass def on_step_begin(self, iteration: int, trials: List["Trial"], **info): """Called at the start of each tuning loop step. Arguments: iteration (int): Number of iterations of the tuning loop. trials (List[Trial]): List of trials. **info: Kwargs dict for forward compatibility. """ pass def on_step_end(self, iteration: int, trials: List["Trial"], **info): """Called at the end of each tuning loop step. The iteration counter is increased before this hook is called. Arguments: iteration (int): Number of iterations of the tuning loop. trials (List[Trial]): List of trials. **info: Kwargs dict for forward compatibility. """ pass def on_trial_start(self, iteration: int, trials: List["Trial"], trial: "Trial", **info): """Called after starting a trial instance. Arguments: iteration (int): Number of iterations of the tuning loop. trials (List[Trial]): List of trials. trial (Trial): Trial that just has been started. **info: Kwargs dict for forward compatibility. """ pass def on_trial_restore(self, iteration: int, trials: List["Trial"], trial: "Trial", **info): """Called after restoring a trial instance. Arguments: iteration (int): Number of iterations of the tuning loop. trials (List[Trial]): List of trials. trial (Trial): Trial that just has been restored. **info: Kwargs dict for forward compatibility. """ pass def on_trial_save(self, iteration: int, trials: List["Trial"], trial: "Trial", **info): """Called after receiving a checkpoint from a trial. Arguments: iteration (int): Number of iterations of the tuning loop. trials (List[Trial]): List of trials. trial (Trial): Trial that just saved a checkpoint. **info: Kwargs dict for forward compatibility. """ pass def on_trial_result(self, iteration: int, trials: List["Trial"], trial: "Trial", result: Dict, **info): """Called after receiving a result from a trial. The search algorithm and scheduler are notified before this hook is called. Arguments: iteration (int): Number of iterations of the tuning loop. trials (List[Trial]): List of trials. trial (Trial): Trial that just sent a result. result (Dict): Result that the trial sent. **info: Kwargs dict for forward compatibility. """ pass def on_trial_complete(self, iteration: int, trials: List["Trial"], trial: "Trial", **info): """Called after a trial instance completed. The search algorithm and scheduler are notified before this hook is called. Arguments: iteration (int): Number of iterations of the tuning loop. trials (List[Trial]): List of trials. trial (Trial): Trial that just has been completed. **info: Kwargs dict for forward compatibility. """ pass def on_trial_error(self, iteration: int, trials: List["Trial"], trial: "Trial", **info): """Called after a trial instance failed (errored). The search algorithm and scheduler are notified before this hook is called. Arguments: iteration (int): Number of iterations of the tuning loop. trials (List[Trial]): List of trials. trial (Trial): Trial that just has errored. **info: Kwargs dict for forward compatibility. """ pass def on_checkpoint(self, iteration: int, trials: List["Trial"], trial: "Trial", checkpoint: Checkpoint, **info): """Called after a trial saved a checkpoint with Tune. Arguments: iteration (int): Number of iterations of the tuning loop. trials (List[Trial]): List of trials. trial (Trial): Trial that just has errored. checkpoint (Checkpoint): Checkpoint object that has been saved by the trial. **info: Kwargs dict for forward compatibility. """ pass class CallbackList: """Call multiple callbacks at once.""" def __init__(self, callbacks: List[Callback]): self._callbacks = callbacks def setup(self): for callback in self._callbacks: callback.setup() def on_step_begin(self, **info): for callback in self._callbacks: callback.on_step_begin(**info) def on_step_end(self, **info): for callback in self._callbacks: callback.on_step_end(**info) def on_trial_start(self, **info): for callback in self._callbacks: callback.on_trial_start(**info) def on_trial_restore(self, **info): for callback in self._callbacks: callback.on_trial_restore(**info) def on_trial_save(self, **info): for callback in self._callbacks: callback.on_trial_save(**info) def on_trial_result(self, **info): for callback in self._callbacks: callback.on_trial_result(**info) def on_trial_complete(self, **info): for callback in self._callbacks: callback.on_trial_complete(**info) def on_trial_error(self, **info): for callback in self._callbacks: callback.on_trial_error(**info) def on_checkpoint(self, **info): for callback in self._callbacks: callback.on_checkpoint(**info)
from typing import TYPE_CHECKING, Dict, List from ray.tune.checkpoint_manager import Checkpoint if TYPE_CHECKING: from ray.tune.trial import Trial class Callback: """Tune base callback that can be extended and passed to a ``TrialRunner`` Tune callbacks are called from within the ``TrialRunner`` class. There are several hooks that can be used, all of which are found in the submethod definitions of this base class. The parameters passed to the ``**info`` dict vary between hooks. The parameters passed are described in the docstrings of the methods. This example will print a metric each time a result is received: .. code-block:: python from ray import tune from ray.tune import Callback class MyCallback(Callback): def on_trial_result(self, iteration, trials, trial, result, **info): print(f"Got result: {result['metric']}") def train(config): for i in range(10): tune.report(metric=i) tune.run( train, callbacks=[MyCallback()]) """ def setup(self): """Called once at the very beginning of training. Any Callback setup should be added here (setting environment variables, etc.) """ pass def on_step_begin(self, iteration: int, trials: List["Trial"], **info): """Called at the start of each tuning loop step. Arguments: iteration (int): Number of iterations of the tuning loop. trials (List[Trial]): List of trials. **info: Kwargs dict for forward compatibility. """ pass def on_step_end(self, iteration: int, trials: List["Trial"], **info): """Called at the end of each tuning loop step. The iteration counter is increased before this hook is called. Arguments: iteration (int): Number of iterations of the tuning loop. trials (List[Trial]): List of trials. **info: Kwargs dict for forward compatibility. """ pass def on_trial_start(self, iteration: int, trials: List["Trial"], trial: "Trial", **info): """Called after starting a trial instance. Arguments: iteration (int): Number of iterations of the tuning loop. trials (List[Trial]): List of trials. trial (Trial): Trial that just has been started. **info: Kwargs dict for forward compatibility. """ pass def on_trial_restore(self, iteration: int, trials: List["Trial"], trial: "Trial", **info): """Called after restoring a trial instance. Arguments: iteration (int): Number of iterations of the tuning loop. trials (List[Trial]): List of trials. trial (Trial): Trial that just has been restored. **info: Kwargs dict for forward compatibility. """ pass def on_trial_save(self, iteration: int, trials: List["Trial"], trial: "Trial", **info): """Called after receiving a checkpoint from a trial. Arguments: iteration (int): Number of iterations of the tuning loop. trials (List[Trial]): List of trials. trial (Trial): Trial that just saved a checkpoint. **info: Kwargs dict for forward compatibility. """ pass def on_trial_result(self, iteration: int, trials: List["Trial"], trial: "Trial", result: Dict, **info): """Called after receiving a result from a trial. The search algorithm and scheduler are notified before this hook is called. Arguments: iteration (int): Number of iterations of the tuning loop. trials (List[Trial]): List of trials. trial (Trial): Trial that just sent a result. result (Dict): Result that the trial sent. **info: Kwargs dict for forward compatibility. """ pass def on_trial_complete(self, iteration: int, trials: List["Trial"], trial: "Trial", **info): """Called after a trial instance completed. The search algorithm and scheduler are notified before this hook is called. Arguments: iteration (int): Number of iterations of the tuning loop. trials (List[Trial]): List of trials. trial (Trial): Trial that just has been completed. **info: Kwargs dict for forward compatibility. """ pass def on_trial_error(self, iteration: int, trials: List["Trial"], trial: "Trial", **info): """Called after a trial instance failed (errored). The search algorithm and scheduler are notified before this hook is called. Arguments: iteration (int): Number of iterations of the tuning loop. trials (List[Trial]): List of trials. trial (Trial): Trial that just has errored. **info: Kwargs dict for forward compatibility. """ pass def on_checkpoint(self, iteration: int, trials: List["Trial"], trial: "Trial", checkpoint: Checkpoint, **info): """Called after a trial saved a checkpoint with Tune. Arguments: iteration (int): Number of iterations of the tuning loop. trials (List[Trial]): List of trials. trial (Trial): Trial that just has errored. checkpoint (Checkpoint): Checkpoint object that has been saved by the trial. **info: Kwargs dict for forward compatibility. """ pass class CallbackList: """Call multiple callbacks at once.""" def __init__(self, callbacks: List[Callback]): self._callbacks = callbacks def setup(self): for callback in self._callbacks: callback.setup() def on_step_begin(self, **info): for callback in self._callbacks: callback.on_step_begin(**info) def on_step_end(self, **info): for callback in self._callbacks: callback.on_step_end(**info) def on_trial_start(self, **info): for callback in self._callbacks: callback.on_trial_start(**info) def on_trial_restore(self, **info): for callback in self._callbacks: callback.on_trial_restore(**info) def on_trial_save(self, **info): for callback in self._callbacks: callback.on_trial_save(**info) def on_trial_result(self, **info): for callback in self._callbacks: callback.on_trial_result(**info) def on_trial_complete(self, **info): for callback in self._callbacks: callback.on_trial_complete(**info) def on_trial_error(self, **info): for callback in self._callbacks: callback.on_trial_error(**info) def on_checkpoint(self, **info): for callback in self._callbacks: callback.on_checkpoint(**info)
import pandas as pd import os import numpy as np paths = { 'Data' : os.path.join(os.getcwd(), 'Data') } def json_reader(file): df = pd.read_json(os.path.join(paths['Data'], file),encoding='utf-8', dtype=int) df.set_index('Lesson-Code') return df def DataFrame_appender(df : pd.DataFrame , ld : pd.DataFrame): lessons_DataFrame : pd.DataFrame = ld.append(df, ignore_index=True,verify_integrity=False) return lessons_DataFrame def DataFrame_Build(): lessons_DataFrame : pd.DataFrame = json_reader(os.path.join(paths['Data'], os.listdir(paths['Data'])[0])) for file in os.listdir(paths['Data'])[1:]: df = json_reader(os.path.join(paths['Data'], file)) lessons_DataFrame = DataFrame_appender(df, lessons_DataFrame) lessons_DataFrame = lessons_DataFrame.convert_dtypes() lessons_DataFrame.dropna(inplace=True,) lessons_DataFrame.drop_duplicates(inplace=True,ignore_index=True,subset=['Lesson-Code']) return lessons_DataFrame def comparing_DataFrames(df1:pd.DataFrame, df2 :pd.DataFrame): try: df2['Registered_diff'] = np.where(df1['Registered'] == df2['Registered'], 0, df2['Registered'] - df1['Registered']) df2['Capacity_diff'] = np.where(df1['Capacity'] == df2['Capacity'], 0, df2['Capacity'] - df1['Capacity']) except: new_removed_lessons = pd.concat([df1, df2]).drop_duplicates(keep=False,subset=['Lesson-Code']) changed_lessons_list = list(new_removed_lessons['Lesson-Code']) changed_lessons_dict = dict.fromkeys(changed_lessons_list) while len(changed_lessons_list) > 0: try: for x in changed_lessons_list: if x in list(df2['Lesson-Code']): df2 = df2[df2['Lesson-Code'] != x] changed_lessons_list.remove(x) #df2.drop(df2.index[df2['Lesson-Code'] == x], axis=0, inplace=True) changed_lessons_dict[x] = 'Added' for x in changed_lessons_list: if x in list(df1['Lesson-Code']): df1 = df1[df1['Lesson-Code'] != x] changed_lessons_list.remove(x) #df1.drop(df1.index[df1['Lesson-Code'] == x], axis=0, inplace=True) changed_lessons_dict[x] = 'Removed' except Exception as e: print(e) df2['Registered_diff'] = np.where(df1['Registered'] == df2['Registered'], 0, df2['Registered'] - df1['Registered']) df2['Capacity_diff'] = np.where(df1['Capacity'] == df2['Capacity'], 0, df2['Capacity'] - df1['Capacity']) if 'new_removed_lessons' in locals(): return [changed_lessons_dict, new_removed_lessons,df2] else: return [] def reporter(df2 :pd.DataFrame,df1 :pd.DataFrame): report = comparing_DataFrames(df1=df1,df2=df2) if len(report) == 3: df2 = report[2] report_list = list() for code,lesson,registered,capacity,updates,teacher,C_updates in zip(df2['Lesson-Code'], df2['Lesson-Name'], df2['Registered'],df2['Capacity'], df2['Registered_diff'],df2['Teacher'],df2['Capacity_diff']): if updates != 0: if updates > 0: report_list.append(f"{lesson} {teacher}،{abs(updates)} {"نفر ثبت نام شد|شدن"}.\nظرفیت:{registered}/{capacity}\nکد: #{code}") else: report_list.append(f"{lesson} {teacher}،{abs(updates)} {"نفر حذف کرد|کردن"}.\nظرفیت:{registered}/{capacity}\nکد: #{code}") if C_updates != 0: if C_updates > 0: report_list.append(f"{lesson} {teacher}،{abs(C_updates)} {"نفر به ظرفیت اضافه شد"}.\nظرفیت:{registered}/{capacity}\nکد: #{code}") else: report_list.append(f"{lesson} {teacher}،{abs(C_updates)} {"نفر از ظرفیت کم شد"}.\nظرفیت:{registered}/{capacity}\nکد: #{code}") if len(report) == 3: ind = 0 report[1] = report[1].reset_index() for les in report[0]: for ind in report[1].reset_index().index[ind:]: if report[0][les] == 'Added': report_list.append(f"{report[1]["Lesson-Name"][ind]} {report[1]["Teacher"][ind]},{"اضافه شد"}.\nکد: #{report[1]["Lesson-Code"][ind]}") elif report[0][les] == 'Removed': report_list.append(f"{report[1]["Lesson-Name"][ind]} {report[1]["Teacher"][ind]},{"حذف شد"}.\nکد: #{report[1]["Lesson-Code"][ind]}") ind += 1 break return report_list def Capacity_Report(df :pd.DataFrame , Lesson_Code : int): return f"{df[df["Lesson-Code"] == Lesson_Code]["Lesson-Name"].values[0]},{df[df["Lesson-Code"] == Lesson_Code]["Teacher"].values[0]}:\nظرفیت:{df[df["Lesson-Code"] == Lesson_Code]["Registered"].values[0]}/{df[df["Lesson-Code"] == Lesson_Code]["Capacity"].values[0]}\nصف:{df[df["Lesson-Code"] == Lesson_Code]["Queue"].values[0]}\nکد: #{df[df["Lesson-Code"] == Lesson_Code]["Lesson-Code"].values[0]}" """df1 = DataFrame_Build() df2 = DataFrame_Build() df2['Registered'][8] = 27 df2['Registered'][30] = 42 df2['Capacity'][60] = 0 df2['Capacity'][56] = 42 dic ={ 'Lesson-Code' : [333010333,333010334], 'Lesson-Name' : ['رياضيات مهندسي','رياضيات مهندسي'], 'Lesson-Weight' : [3,3], 'Lesson-A-Weight' : [0,0], 'Capacity' : [40,30], 'Registered' : [10,15], 'Queue' : [0,0], 'Sex' : ['مختلط','مختلط'], 'Teacher' : ['رشولي آيسا','رسولي آيسا'], 'schedule' : ['درس(ت): يك شنبه ۱۰:۳۰-۱۲:۳۰ مکان: ۲۰۲، درس(ت):...','درس(ت): يك شنبه ۱۰:۳۰-۱۲:۳۰ مکان: ۲۰۲، درس(ت):...'], 'Exam-schedule' : ['تاريخ: ۱۴۰۱/۰۳/۱۶ ساعت: ۱۳:۳۰-۱۶:۳۰','تاريخ: ۱۴۰۱/۰۳/۱۶ ساعت: ۱۳:۳۰-۱۶:۳۰'], 'Abandon' : ['مجاز براي مقطع كارشناسي، دانشکده مهندسي مكانيك،','تاريخ: ۱۴۰۱/۰۳/۱۶ ساعت: ۱۳:۳۰-۱۶:۳۰'], 'Specification' : ['ترم ۳۹۹۱','ترم ۳۹۹۱'], 'Anti-Lesson' : ['ندارد','ندارد'], 'Presentation-Mode' : ['عادي','عادي'], 'Offline-Online' : ['حضوري','حضوري'], 'Description' : ['كلاس اين درس بصورت حضوري برگزار مي شود و به دا...','كلاس اين درس بصورت حضوري برگزار مي شود و به دا...'], } df3 = pd.DataFrame(dic) #df2 = df2.append(df3) #df2.drop([89,34], inplace=True) #df2.reset_index() report = reporter(df2,df1) for r in report: print(r) for i in list(df2['Lesson-Code']): print(Capacity_Report(df2,i))"""
import pandas as pd import os import numpy as np paths = { 'Data' : os.path.join(os.getcwd(), 'Data') } def json_reader(file): df = pd.read_json(os.path.join(paths['Data'], file),encoding='utf-8', dtype=int) df.set_index('Lesson-Code') return df def DataFrame_appender(df : pd.DataFrame , ld : pd.DataFrame): lessons_DataFrame : pd.DataFrame = ld.append(df, ignore_index=True,verify_integrity=False) return lessons_DataFrame def DataFrame_Build(): lessons_DataFrame : pd.DataFrame = json_reader(os.path.join(paths['Data'], os.listdir(paths['Data'])[0])) for file in os.listdir(paths['Data'])[1:]: df = json_reader(os.path.join(paths['Data'], file)) lessons_DataFrame = DataFrame_appender(df, lessons_DataFrame) lessons_DataFrame = lessons_DataFrame.convert_dtypes() lessons_DataFrame.dropna(inplace=True,) lessons_DataFrame.drop_duplicates(inplace=True,ignore_index=True,subset=['Lesson-Code']) return lessons_DataFrame def comparing_DataFrames(df1:pd.DataFrame, df2 :pd.DataFrame): try: df2['Registered_diff'] = np.where(df1['Registered'] == df2['Registered'], 0, df2['Registered'] - df1['Registered']) df2['Capacity_diff'] = np.where(df1['Capacity'] == df2['Capacity'], 0, df2['Capacity'] - df1['Capacity']) except: new_removed_lessons = pd.concat([df1, df2]).drop_duplicates(keep=False,subset=['Lesson-Code']) changed_lessons_list = list(new_removed_lessons['Lesson-Code']) changed_lessons_dict = dict.fromkeys(changed_lessons_list) while len(changed_lessons_list) > 0: try: for x in changed_lessons_list: if x in list(df2['Lesson-Code']): df2 = df2[df2['Lesson-Code'] != x] changed_lessons_list.remove(x) #df2.drop(df2.index[df2['Lesson-Code'] == x], axis=0, inplace=True) changed_lessons_dict[x] = 'Added' for x in changed_lessons_list: if x in list(df1['Lesson-Code']): df1 = df1[df1['Lesson-Code'] != x] changed_lessons_list.remove(x) #df1.drop(df1.index[df1['Lesson-Code'] == x], axis=0, inplace=True) changed_lessons_dict[x] = 'Removed' except Exception as e: print(e) df2['Registered_diff'] = np.where(df1['Registered'] == df2['Registered'], 0, df2['Registered'] - df1['Registered']) df2['Capacity_diff'] = np.where(df1['Capacity'] == df2['Capacity'], 0, df2['Capacity'] - df1['Capacity']) if 'new_removed_lessons' in locals(): return [changed_lessons_dict, new_removed_lessons,df2] else: return [] def reporter(df2 :pd.DataFrame,df1 :pd.DataFrame): report = comparing_DataFrames(df1=df1,df2=df2) if len(report) == 3: df2 = report[2] report_list = list() for code,lesson,registered,capacity,updates,teacher,C_updates in zip(df2['Lesson-Code'], df2['Lesson-Name'], df2['Registered'],df2['Capacity'], df2['Registered_diff'],df2['Teacher'],df2['Capacity_diff']): if updates != 0: if updates > 0: report_list.append(f"{lesson} {teacher}،{abs(updates)} {'نفر ثبت نام شد|شدن'}.\nظرفیت:{registered}/{capacity}\nکد: #{code}") else: report_list.append(f"{lesson} {teacher}،{abs(updates)} {'نفر حذف کرد|کردن'}.\nظرفیت:{registered}/{capacity}\nکد: #{code}") if C_updates != 0: if C_updates > 0: report_list.append(f"{lesson} {teacher}،{abs(C_updates)} {'نفر به ظرفیت اضافه شد'}.\nظرفیت:{registered}/{capacity}\nکد: #{code}") else: report_list.append(f"{lesson} {teacher}،{abs(C_updates)} {'نفر از ظرفیت کم شد'}.\nظرفیت:{registered}/{capacity}\nکد: #{code}") if len(report) == 3: ind = 0 report[1] = report[1].reset_index() for les in report[0]: for ind in report[1].reset_index().index[ind:]: if report[0][les] == 'Added': report_list.append(f"{report[1]['Lesson-Name'][ind]} {report[1]['Teacher'][ind]},{'اضافه شد'}.\nکد: #{report[1]['Lesson-Code'][ind]}") elif report[0][les] == 'Removed': report_list.append(f"{report[1]['Lesson-Name'][ind]} {report[1]['Teacher'][ind]},{'حذف شد'}.\nکد: #{report[1]['Lesson-Code'][ind]}") ind += 1 break return report_list def Capacity_Report(df :pd.DataFrame , Lesson_Code : int): return f"{df[df['Lesson-Code'] == Lesson_Code]['Lesson-Name'].values[0]},{df[df['Lesson-Code'] == Lesson_Code]['Teacher'].values[0]}:\nظرفیت:{df[df['Lesson-Code'] == Lesson_Code]['Registered'].values[0]}/{df[df['Lesson-Code'] == Lesson_Code]['Capacity'].values[0]}\nصف:{df[df['Lesson-Code'] == Lesson_Code]['Queue'].values[0]}\nکد: #{df[df['Lesson-Code'] == Lesson_Code]['Lesson-Code'].values[0]}" """df1 = DataFrame_Build() df2 = DataFrame_Build() df2['Registered'][8] = 27 df2['Registered'][30] = 42 df2['Capacity'][60] = 0 df2['Capacity'][56] = 42 dic ={ 'Lesson-Code' : [333010333,333010334], 'Lesson-Name' : ['رياضيات مهندسي','رياضيات مهندسي'], 'Lesson-Weight' : [3,3], 'Lesson-A-Weight' : [0,0], 'Capacity' : [40,30], 'Registered' : [10,15], 'Queue' : [0,0], 'Sex' : ['مختلط','مختلط'], 'Teacher' : ['رشولي آيسا','رسولي آيسا'], 'schedule' : ['درس(ت): يك شنبه ۱۰:۳۰-۱۲:۳۰ مکان: ۲۰۲، درس(ت):...','درس(ت): يك شنبه ۱۰:۳۰-۱۲:۳۰ مکان: ۲۰۲، درس(ت):...'], 'Exam-schedule' : ['تاريخ: ۱۴۰۱/۰۳/۱۶ ساعت: ۱۳:۳۰-۱۶:۳۰','تاريخ: ۱۴۰۱/۰۳/۱۶ ساعت: ۱۳:۳۰-۱۶:۳۰'], 'Abandon' : ['مجاز براي مقطع كارشناسي، دانشکده مهندسي مكانيك،','تاريخ: ۱۴۰۱/۰۳/۱۶ ساعت: ۱۳:۳۰-۱۶:۳۰'], 'Specification' : ['ترم ۳۹۹۱','ترم ۳۹۹۱'], 'Anti-Lesson' : ['ندارد','ندارد'], 'Presentation-Mode' : ['عادي','عادي'], 'Offline-Online' : ['حضوري','حضوري'], 'Description' : ['كلاس اين درس بصورت حضوري برگزار مي شود و به دا...','كلاس اين درس بصورت حضوري برگزار مي شود و به دا...'], } df3 = pd.DataFrame(dic) #df2 = df2.append(df3) #df2.drop([89,34], inplace=True) #df2.reset_index() report = reporter(df2,df1) for r in report: print(r) for i in list(df2['Lesson-Code']): print(Capacity_Report(df2,i))"""
import demistomock as demisto from CommonServerPython import * from CommonServerUserPython import * ''' IMPORTS ''' import json import requests import base64 import email import hashlib from typing import List from dateutil.parser import parse from typing import Dict, Tuple, Any, Optional, Union # Disable insecure warnings requests.packages.urllib3.disable_warnings() ''' GLOBALS/PARAMS ''' CLIENT_ID = demisto.params().get('client_id') SECRET = demisto.params().get('secret') # Remove trailing slash to prevent wrong URL path to service SERVER = demisto.params()['url'][:-1] if (demisto.params()['url'] and demisto.params()['url'].endswith('/')) else \ demisto.params()['url'] # Should we use SSL USE_SSL = not demisto.params().get('insecure', False) # How many time before the first fetch to retrieve incidents FETCH_TIME = demisto.params().get('fetch_time', '3 days') BYTE_CREDS = '{name}:{password}'.format(name=CLIENT_ID, password=SECRET).encode('utf-8') # Headers to be sent in requests HEADERS = { 'Content-Type': 'application/json', 'Accept': 'application/json', 'Authorization': 'Basic {}'.format(base64.b64encode(BYTE_CREDS).decode()) } # Note: True life time of token is actually 30 mins TOKEN_LIFE_TIME = 28 INCIDENTS_PER_FETCH = int(demisto.params().get('incidents_per_fetch', 15)) # Remove proxy if not set to true in params handle_proxy() ''' KEY DICTIONARY ''' DETECTIONS_BASE_KEY_MAP = { 'device.hostname': 'System', 'device.cid': 'CustomerID', 'hostinfo.domain': 'MachineDomain', 'detection_id': 'ID', 'created_timestamp': 'ProcessStartTime', 'max_severity': 'MaxSeverity', 'show_in_ui': 'ShowInUi', 'status': 'Status' } DETECTIONS_BEHAVIORS_KEY_MAP = { 'filename': 'FileName', 'scenario': 'Scenario', 'md5': 'MD5', 'sha256': 'SHA256', 'ioc_type': 'IOCType', 'ioc_value': 'IOCValue', 'cmdline': 'CommandLine', 'user_name': 'UserName', 'behavior_id': 'ID', } SEARCH_IOC_KEY_MAP = { 'type': 'Type', 'value': 'Value', 'policy': 'Policy', 'source': 'Source', 'share_level': 'ShareLevel', 'expiration_timestamp': 'Expiration', 'description': 'Description', 'created_timestamp': 'CreatedTime', 'created_by': 'CreatedBy', 'modified_timestamp': 'ModifiedTime', 'modified_by': 'ModifiedBy' } SEARCH_DEVICE_KEY_MAP = { 'device_id': 'ID', 'external_ip': 'ExternalIP', 'local_ip': 'LocalIP', 'hostname': 'Hostname', 'os_version': 'OS', 'mac_address': 'MacAddress', 'first_seen': 'FirstSeen', 'last_seen': 'LastSeen' } ''' SPLIT KEY DICTIONARY ''' """ Pattern: { 'Path': 'Path to item', 'NewKey': 'Value of output key', 'Delim': 'Delimiter char', 'Index': Split Array Index } """ DETECTIONS_BEHAVIORS_SPLIT_KEY_MAP = [ { 'Path': 'parent_details.parent_process_graph_id', 'NewKey': 'SensorID', 'Delim': ':', 'Index': 1 }, { 'Path': 'parent_details.parent_process_graph_id', 'NewKey': 'ParentProcessID', 'Delim': ':', 'Index': 2 }, { 'Path': 'triggering_process_graph_id', 'NewKey': 'ProcessID', 'Delim': ':', 'Index': 2 }, ] ''' HELPER FUNCTIONS ''' def http_request(method, url_suffix, params=None, data=None, files=None, headers=HEADERS, safe=False, get_token_flag=True, no_json=False): """ A wrapper for requests lib to send our requests and handle requests and responses better. :type method: ``str`` :param method: HTTP method for the request. :type url_suffix: ``str`` :param url_suffix: The suffix of the URL (endpoint) :type params: ``dict`` :param params: The URL params to be passed. :type data: ``str`` :param data: The body data of the request. :type headers: ``dict`` :param headers: Request headers :type safe: ``bool`` :param safe: If set to true will return None in case of http error :type get_token_flag: ``bool`` :param get_token_flag: If set to True will call get_token() :type no_json: ``bool`` :param no_json: If set to true will not parse the content and will return the raw response object for successful response :return: Returns the http request response json :rtype: ``dict`` """ if get_token_flag: token = get_token() headers['Authorization'] = 'Bearer {}'.format(token) url = SERVER + url_suffix try: res = requests.request( method, url, verify=USE_SSL, params=params, data=data, headers=headers, files=files ) except requests.exceptions.RequestException: return_error('Error in connection to the server. Please make sure you entered the URL correctly.') try: if res.status_code not in {200, 201, 202, 204}: res_json = res.json() reason = res.reason resources = res_json.get('resources', {}) if resources: for host_id, resource in resources.items(): errors = resource.get('errors', []) if errors: error_message = errors[0].get('message') reason += f'\nHost ID {host_id} - {error_message}' elif res_json.get('errors'): errors = res_json.get('errors', []) for error in errors: reason += f"\n{error.get("message")}" err_msg = 'Error in API call to CrowdStrike Falcon: code: {code} - reason: {reason}'.format( code=res.status_code, reason=reason ) # try to create a new token if res.status_code == 403 and get_token_flag: LOG(err_msg) token = get_token(new_token=True) headers['Authorization'] = 'Bearer {}'.format(token) return http_request(method, url_suffix, params, data, headers, safe, get_token_flag=False) elif safe: return None return_error(err_msg) return res if no_json else res.json() except ValueError as exception: raise ValueError( f'Failed to parse json object from response: {exception} - {res.content}') # type: ignore[str-bytes-safe] def create_entry_object(contents: Union[List[Any], Dict[str, Any]] = {}, ec: Union[List[Any], Dict[str, Any]] = None, hr: str = ''): """ Creates an entry object :type contents: ``dict`` :param contents: Raw response to output :type ec: ``dict`` :param ec: Entry context of the entry object :type hr: ``str`` :param hr: Human readable :return: Entry object :rtype: ``dict`` """ return { 'Type': entryTypes['note'], 'Contents': contents, 'ContentsFormat': formats['json'], 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': hr, 'EntryContext': ec } def detection_to_incident(detection): """ Creates an incident of a detection. :type detection: ``dict`` :param detection: Single detection object :return: Incident representation of a detection :rtype ``dict`` """ incident = { 'name': 'Detection ID: ' + str(detection.get('detection_id')), 'occurred': str(detection.get('created_timestamp')), 'rawJSON': json.dumps(detection), 'severity': severity_string_to_int(detection.get('max_severity_displayname')) } return incident def incident_to_incident_context(incident): """ Creates an incident context of a incident. :type incident: ``dict`` :param incident: Single detection object :return: Incident context representation of a incident :rtype ``dict`` """ incident_id = str(incident.get('incident_id')) incident_hosts = incident.get('hosts')[0] incident_context = { 'name': f'Incident ID: {incident_id}', 'occurred': incident_hosts.get('modified_timestamp'), 'rawJSON': json.dumps(incident) } return incident_context def severity_string_to_int(severity): """ Converts a severity string to DBot score representation :type severity: ``str`` :param severity: String representation of a severity :return: DBot score representation of the severity :rtype ``int`` """ if severity in ('Critical', 'High'): return 3 elif severity in ('Medium', 'Low'): return 2 return 0 def get_trasnformed_dict(old_dict, transformation_dict): """ Returns a dictionary with the same values as old_dict, with the correlating key:value in transformation_dict :type old_dict: ``dict`` :param old_dict: Old dictionary to pull values from :type transformation_dict: ``dict`` :param transformation_dict: Transformation dictionary that contains oldkeys:newkeys :return Transformed dictionart (according to transformation_dict values) :rtype ``dict`` """ new_dict = {} for k in list(old_dict.keys()): if k in transformation_dict: new_dict[transformation_dict[k]] = old_dict[k] return new_dict def extract_transformed_dict_with_split(old_dict, transformation_dict_arr): """ Extracts new values out of old_dict using a json structure of: {'Path': 'Path to item', 'NewKey': 'Value of output key', 'Delim': 'Delimiter char', 'Index': Split Array Index} """ new_dict = {} for trans_dict in transformation_dict_arr: try: val = demisto.get(old_dict, trans_dict['Path']) if 'split' in dir(val): i = trans_dict['Index'] new_dict[trans_dict['NewKey']] = val.split(trans_dict['Delim'])[i] except Exception as ex: LOG('Error {exception} with: {tdict}'.format(exception=ex, tdict=trans_dict)) return new_dict def get_passed_mins(start_time, end_time_str): """ Returns the time passed in mins :param start_time: Start time in datetime :param end_time_str: End time in str :return: The passed mins in int """ time_delta = start_time - datetime.fromtimestamp(end_time_str) return time_delta.seconds / 60 ''' COMMAND SPECIFIC FUNCTIONS ''' def init_rtr_single_session(host_id: str) -> str: """ Start a session with single host. :param host_id: Host agent ID to initialize a RTR session on. :return: The session ID to execute the command on """ endpoint_url = '/real-time-response/entities/sessions/v1' body = json.dumps({ 'device_id': host_id }) response = http_request('POST', endpoint_url, data=body) resources = response.get('resources') if resources and isinstance(resources, list) and isinstance(resources[0], dict): session_id = resources[0].get('session_id') if isinstance(session_id, str): return session_id raise ValueError('No session id found in the response') def init_rtr_batch_session(host_ids: list) -> str: """ Start a session with one or more hosts :param host_ids: List of host agent ID’s to initialize a RTR session on. :return: The session batch ID to execute the command on """ endpoint_url = '/real-time-response/combined/batch-init-session/v1' body = json.dumps({ 'host_ids': host_ids }) response = http_request('POST', endpoint_url, data=body) return response.get('batch_id') def refresh_session(host_id: str) -> Dict: """ Refresh a session timeout on a single host. :param host_id: Host agent ID to run RTR command on. :return: Response JSON which contains errors (if exist) and retrieved resources """ endpoint_url = '/real-time-response/entities/refresh-session/v1' body = json.dumps({ 'device_id': host_id }) response = http_request('POST', endpoint_url, data=body) return response def run_batch_read_cmd(host_ids: list, command_type: str, full_command: str) -> Dict: """ Sends RTR command scope with read access :param host_ids: List of host agent ID’s to run RTR command on. :param command_type: Read-only command type we are going to execute, for example: ls or cd. :param full_command: Full command string for the command. :return: Response JSON which contains errors (if exist) and retrieved resources """ endpoint_url = '/real-time-response/combined/batch-command/v1' batch_id = init_rtr_batch_session(host_ids) body = json.dumps({ 'base_command': command_type, 'batch_id': batch_id, 'command_string': full_command }) response = http_request('POST', endpoint_url, data=body) return response def run_batch_write_cmd(host_ids: list, command_type: str, full_command: str) -> Dict: """ Sends RTR command scope with write access :param host_ids: List of host agent ID’s to run RTR command on. :param command_type: Read-only command type we are going to execute, for example: ls or cd. :param full_command: Full command string for the command. :return: Response JSON which contains errors (if exist) and retrieved resources """ endpoint_url = '/real-time-response/combined/batch-active-responder-command/v1' batch_id = init_rtr_batch_session(host_ids) body = json.dumps({ 'base_command': command_type, 'batch_id': batch_id, 'command_string': full_command }) response = http_request('POST', endpoint_url, data=body) return response def run_batch_admin_cmd(host_ids: list, command_type: str, full_command: str) -> Dict: """ Sends RTR command scope with write access :param host_ids: List of host agent ID’s to run RTR command on. :param command_type: Read-only command type we are going to execute, for example: ls or cd. :param full_command: Full command string for the command. :return: Response JSON which contains errors (if exist) and retrieved resources """ endpoint_url = '/real-time-response/combined/batch-admin-command/v1' batch_id = init_rtr_batch_session(host_ids) body = json.dumps({ 'base_command': command_type, 'batch_id': batch_id, 'command_string': full_command }) response = http_request('POST', endpoint_url, data=body) return response def run_batch_get_cmd(host_ids: list, file_path: str, optional_hosts: list = None, timeout: int = None, timeout_duration: str = None) -> Dict: """ Batch executes `get` command across hosts to retrieve files. After this call is made `/real-time-response/combined/batch-get-command/v1` is used to query for the results. :param host_ids: List of host agent ID’s to run RTR command on. :param file_path: Full path to the file that is to be retrieved from each host in the batch. :param optional_hosts: List of a subset of hosts we want to run the command on. If this list is supplied, only these hosts will receive the command. :param timeout: Timeout for how long to wait for the request in seconds :param timeout_duration: Timeout duration for for how long to wait for the request in duration syntax :return: Response JSON which contains errors (if exist) and retrieved resources """ endpoint_url = '/real-time-response/combined/batch-get-command/v1' batch_id = init_rtr_batch_session(host_ids) body = assign_params(batch_id=batch_id, file_path=file_path, optional_hosts=optional_hosts) params = assign_params(timeout=timeout, timeout_duration=timeout_duration) response = http_request('POST', endpoint_url, data=json.dumps(body), params=params) return response def status_get_cmd(request_id: str, timeout: int = None, timeout_duration: str = None) -> Dict: """ Retrieves the status of the specified batch get command. Will return successful files when they are finished processing. :param request_id: ID to the request of `get` command. :param timeout: Timeout for how long to wait for the request in seconds :param timeout_duration: Timeout duration for for how long to wait for the request in duration syntax :return: Response JSON which contains errors (if exist) and retrieved resources """ endpoint_url = '/real-time-response/combined/batch-get-command/v1' params = assign_params(timeout=timeout, timeout_duration=timeout_duration, batch_get_cmd_req_id=request_id) response = http_request('GET', endpoint_url, params=params) return response def run_single_read_cmd(host_id: str, command_type: str, full_command: str) -> Dict: """ Sends RTR command scope with read access :param host_id: Host agent ID to run RTR command on. :param command_type: Active-Responder command type we are going to execute, for example: get or cp. :param full_command: Full command string for the command. :return: Response JSON which contains errors (if exist) and retrieved resources """ endpoint_url = '/real-time-response/entities/command/v1' session_id = init_rtr_single_session(host_id) body = json.dumps({ 'base_command': command_type, 'command_string': full_command, 'session_id': session_id }) response = http_request('POST', endpoint_url, data=body) return response def run_single_write_cmd(host_id: str, command_type: str, full_command: str) -> Dict: """ Sends RTR command scope with write access :param host_id: Host agent ID to run RTR command on. :param command_type: Active-Responder command type we are going to execute, for example: get or cp. :param full_command: Full command string for the command. :return: Response JSON which contains errors (if exist) and retrieved resources """ endpoint_url = '/real-time-response/entities/active-responder-command/v1' session_id = init_rtr_single_session(host_id) body = json.dumps({ 'base_command': command_type, 'command_string': full_command, 'session_id': session_id }) response = http_request('POST', endpoint_url, data=body) return response def run_single_admin_cmd(host_id: str, command_type: str, full_command: str) -> Dict: """ Sends RTR command scope with admin access :param host_id: Host agent ID to run RTR command on. :param command_type: Active-Responder command type we are going to execute, for example: get or cp. :param full_command: Full command string for the command. :return: Response JSON which contains errors (if exist) and retrieved resources """ endpoint_url = '/real-time-response/entities/admin-command/v1' session_id = init_rtr_single_session(host_id) body = json.dumps({ 'base_command': command_type, 'command_string': full_command, 'session_id': session_id }) response = http_request('POST', endpoint_url, data=body) return response def status_read_cmd(request_id: str, sequence_id: Optional[int]) -> Dict: """ Get status of an executed command with read access on a single host. :param request_id: Cloud Request ID of the executed command to query :param sequence_id: Sequence ID that we want to retrieve. Command responses are chunked across sequences """ endpoint_url = '/real-time-response/entities/command/v1' params = { 'cloud_request_id': request_id, 'sequence_id': sequence_id or 0 } response = http_request('GET', endpoint_url, params=params) return response def status_write_cmd(request_id: str, sequence_id: Optional[int]) -> Dict: """ Get status of an executed command with write access on a single host. :param request_id: Cloud Request ID of the executed command to query :param sequence_id: Sequence ID that we want to retrieve. Command responses are chunked across sequences """ endpoint_url = '/real-time-response/entities/active-responder-command/v1' params = { 'cloud_request_id': request_id, 'sequence_id': sequence_id or 0 } response = http_request('GET', endpoint_url, params=params) return response def status_admin_cmd(request_id: str, sequence_id: Optional[int]) -> Dict: """ Get status of an executed command with admin access on a single host. :param request_id: Cloud Request ID of the executed command to query :param sequence_id: Sequence ID that we want to retrieve. Command responses are chunked across sequences """ endpoint_url = '/real-time-response/entities/admin-command/v1' params = { 'cloud_request_id': request_id, 'sequence_id': sequence_id or 0 } response = http_request('GET', endpoint_url, params=params) return response def list_host_files(host_id: str) -> Dict: """ Get a list of files for the specified RTR session on a host. :param host_id: Host agent ID to run RTR command on. :return: Response JSON which contains errors (if exist) and retrieved resources """ endpoint_url = '/real-time-response/entities/file/v1' session_id = init_rtr_single_session(host_id) params = { 'session_id': session_id } response = http_request('GET', endpoint_url, params=params) return response def upload_script(name: str, permission_type: str, content: str, entry_id: str) -> Dict: """ Uploads a script by either given content or file :param name: Script name to upload :param permission_type: Permissions type of script to upload :param content: PowerShell script content :param entry_id: Script file to upload :return: Response JSON which contains errors (if exist) and how many resources were affected """ endpoint_url = '/real-time-response/entities/scripts/v1' body: Dict[str, Tuple[Any, Any]] = { 'name': (None, name), 'permission_type': (None, permission_type) } temp_file = None try: if content: body['content'] = (None, content) else: # entry_id was provided file_ = demisto.getFilePath(entry_id) file_name = file_.get('name') # pylint: disable=E1101 temp_file = open(file_.get('path'), 'rb') # pylint: disable=E1101 body['file'] = (file_name, temp_file) headers = { 'Authorization': HEADERS['Authorization'], 'Accept': 'application/json' } response = http_request('POST', endpoint_url, files=body, headers=headers) return response finally: if temp_file: temp_file.close() def get_script(script_id: list) -> Dict: """ Retrieves a script given its ID :param script_id: ID of script to get :return: Response JSON which contains errors (if exist) and retrieved resource """ endpoint_url = '/real-time-response/entities/scripts/v1' params = { 'ids': script_id } response = http_request('GET', endpoint_url, params=params) return response def delete_script(script_id: str) -> Dict: """ Deletes a script given its ID :param script_id: ID of script to delete :return: Response JSON which contains errors (if exist) and how many resources were affected """ endpoint_url = '/real-time-response/entities/scripts/v1' params = { 'ids': script_id } response = http_request('DELETE', endpoint_url, params=params) return response def list_scripts() -> Dict: """ Retrieves list of scripts :return: Response JSON which contains errors (if exist) and retrieved resources """ endpoint_url = '/real-time-response/entities/scripts/v1' response = http_request('GET', endpoint_url) return response def get_extracted_file(host_id: str, sha256: str, filename: str = None): """ Get RTR extracted file contents for specified session and sha256. :param host_id: The host agent ID to initialize the RTR session on. :param sha256: Extracted SHA256 :param filename: Filename to use for the archive name and the file within the archive. """ endpoint_url = '/real-time-response/entities/extracted-file-contents/v1' session_id = init_rtr_single_session(host_id) params = { 'session_id': session_id, 'sha256': sha256 } if filename: params['filename'] = filename response = http_request('GET', endpoint_url, params=params, no_json=True) return response def upload_file(entry_id: str, description: str) -> Tuple: """ Uploads a file given entry ID :param entry_id: The entry ID of the file to upload :param description: String description of file to upload :return: Response JSON which contains errors (if exist) and how many resources were affected and the file name """ endpoint_url = '/real-time-response/entities/put-files/v1' temp_file = None try: file_ = demisto.getFilePath(entry_id) file_name = file_.get('name') # pylint: disable=E1101 temp_file = open(file_.get('path'), 'rb') # pylint: disable=E1101 body = { 'name': (None, file_name), 'description': (None, description), 'file': (file_name, temp_file) } headers = { 'Authorization': HEADERS['Authorization'], 'Accept': 'application/json' } response = http_request('POST', endpoint_url, files=body, headers=headers) return response, file_name finally: if temp_file: temp_file.close() def delete_file(file_id: str) -> Dict: """ Delete a put-file based on the ID given :param file_id: ID of file to delete :return: Response JSON which contains errors (if exist) and how many resources were affected """ endpoint_url = '/real-time-response/entities/put-files/v1' params = { 'ids': file_id } response = http_request('DELETE', endpoint_url, params=params) return response def get_file(file_id: list) -> Dict: """ Get put-files based on the ID's given :param file_id: ID of file to get :return: Response JSON which contains errors (if exist) and retrieved resources """ endpoint_url = '/real-time-response/entities/put-files/v1' params = { 'ids': file_id } response = http_request('GET', endpoint_url, params=params) return response def list_files() -> Dict: """ Get a list of put-file ID's that are available to the user for the put command. :return: Response JSON which contains errors (if exist) and retrieved resources """ endpoint_url = '/real-time-response/entities/put-files/v1' response = http_request('GET', endpoint_url) return response def get_token(new_token=False): """ Retrieves the token from the server if it's expired and updates the global HEADERS to include it :param new_token: If set to True will generate a new token regardless of time passed :rtype: ``str`` :return: Token """ now = datetime.now() ctx = demisto.getIntegrationContext() if ctx and not new_token: passed_mins = get_passed_mins(now, ctx.get('time')) if passed_mins >= TOKEN_LIFE_TIME: # token expired auth_token = get_token_request() demisto.setIntegrationContext({'auth_token': auth_token, 'time': date_to_timestamp(now) / 1000}) else: # token hasn't expired auth_token = ctx.get('auth_token') else: # there is no token auth_token = get_token_request() demisto.setIntegrationContext({'auth_token': auth_token, 'time': date_to_timestamp(now) / 1000}) return auth_token def get_token_request(): """ Sends token request :rtype ``str`` :return: Access token """ body = { 'client_id': CLIENT_ID, 'client_secret': SECRET } headers = { 'Authorization': HEADERS['Authorization'] } token_res = http_request('POST', '/oauth2/token', data=body, headers=headers, safe=True, get_token_flag=False) if not token_res: err_msg = 'Authorization Error: User has no authorization to create a token. Please make sure you entered the' \ ' credentials correctly.' raise Exception(err_msg) return token_res.get('access_token') def get_detections(last_behavior_time=None, behavior_id=None, filter_arg=None): """ Sends detections request. The function will ignore the arguments passed according to priority: filter_arg > behavior_id > last_behavior_time :param last_behavior_time: 3rd priority. The last behavior time of results will be greater than this value :param behavior_id: 2nd priority. The result will only contain the detections with matching behavior id :param filter_arg: 1st priority. The result will be filtered using this argument. :return: Response json of the get detection endpoint (IDs of the detections) """ endpoint_url = '/detects/queries/detects/v1' params = { 'sort': 'first_behavior.asc' } if filter_arg: params['filter'] = filter_arg elif behavior_id: params['filter'] = "behaviors.behavior_id:'{0}'".format(behavior_id) elif last_behavior_time: params['filter'] = "first_behavior:>'{0}'".format(last_behavior_time) response = http_request('GET', endpoint_url, params) return response def get_fetch_detections(last_created_timestamp=None, filter_arg=None, offset: int = 0): """ Sends detection request, based on the created_timestamp field. Used for fetch-incidents Args: last_created_timestamp: last created timestamp of the results will be greater than this value. filter_arg: The result will be filtered using this argument. Returns: Response json of the get detection endpoint (IDs of the detections) """ endpoint_url = '/detects/queries/detects/v1' params = { 'sort': 'first_behavior.asc', 'offset': offset, 'limit': INCIDENTS_PER_FETCH } if filter_arg: params['filter'] = filter_arg elif last_created_timestamp: params['filter'] = "created_timestamp:>'{0}'".format(last_created_timestamp) response = http_request('GET', endpoint_url, params) return response def get_detections_entities(detections_ids): """ Sends detection entities request :param detections_ids: IDs of the requested detections. :return: Response json of the get detection entities endpoint (detection objects) """ ids_json = {'ids': detections_ids} if detections_ids: response = http_request( 'POST', '/detects/entities/summaries/GET/v1', data=json.dumps(ids_json) ) return response return detections_ids def get_incidents_ids(last_created_timestamp=None, filter_arg=None, offset: int = 0): get_incidents_endpoint = '/incidents/queries/incidents/v1' params = { 'sort': 'modified_timestamp.asc', 'offset': offset, 'limit': INCIDENTS_PER_FETCH } if filter_arg: params['filter'] = filter_arg elif last_created_timestamp: params['filter'] = "modified_timestamp:>'{0}'".format(last_created_timestamp) response = http_request('GET', get_incidents_endpoint, params) return response def get_incidents_entities(incidents_ids): ids_json = {'ids': incidents_ids} response = http_request( 'POST', '/incidents/entities/incidents/GET/v1', data=json.dumps(ids_json) ) return response def create_ioc(): """ UNTESTED - Creates an IoC :return: Response json of create IoC request """ args = demisto.args() input_args = {} # req args: input_args['type'] = args['ioc_type'] input_args['value'] = args['ioc_value'] input_args['policy'] = args['policy'] # opt args: input_args['expiration_days'] = args.get('expiration_days') input_args['source'] = args.get('source') input_args['description'] = args.get('description') payload = {k: str(v) for k, v in input_args.items() if v} headers = {'Authorization': HEADERS['Authorization']} return http_request('POST', '/indicators/entities/iocs/v1', params=payload, headers=headers) def search_iocs(): """ UNTESTED IN OAUTH 2- Searches an IoC :return: IoCs that were found in the search """ args = demisto.args() ids = args.get('ids') if not ids: search_args = { 'types': str(args.get('ioc_types', '')).split(','), 'values': str(args.get('ioc_values', '')).split(','), 'policies': str(args.get('policy', '')), 'sources': str(args.get('sources', '')).split(','), 'from.expiration_timestamp': str(args.get('expiration_from', '')), 'to.expiration_timestamp': str(args.get('expiration_to', '')), 'limit': str(args.get('limit', 50)) } payload = {} for k, arg in search_args.items(): if type(arg) is list: if arg[0]: payload[k] = arg elif arg: payload[k] = arg ids = http_request('GET', '/indicators/queries/iocs/v1', payload).get('resources') if not ids: return None else: ids = str(ids) payload = { 'ids': ids } return http_request('GET', '/indicators/entities/iocs/v1', params=payload) def enrich_ioc_dict_with_ids(ioc_dict): """ Enriches the provided ioc_dict with IoC ID :param ioc_dict: IoC dict transformed using the SEARCH_IOC_KEY_MAP :return: ioc_dict with its ID key:value updated """ for ioc in ioc_dict: ioc['ID'] = '{type}:{val}'.format(type=ioc.get('Type'), val=ioc.get('Value')) return ioc_dict def delete_ioc(): """ UNTESTED - Sends a delete IoC request :return: Response json of delete IoC """ ids = str(demisto.args().get('ids')) payload = { 'ids': ids } return http_request('DELETE', '/indicators/entities/iocs/v1', payload) def update_iocs(): """ UNTESTED - Updates the values one or more IoC :return: Response json of update IoC request """ args = demisto.args() input_args = { 'ids': args.get('ids'), 'policy': args.get('policy', ''), 'expiration_days': args.get('expiration_days', ''), 'source': args.get('source'), 'description': args.get('description') } payload = {k: str(v) for k, v in input_args.items() if v} headers = {'Authorization': HEADERS['Authorization']} return http_request('PATCH', '/indicators/entities/iocs/v1', params=payload, headers=headers) def search_device(): """ Searches for devices using the argument provided by the command execution. Returns empty result of no device was found :return: Search device response json """ args = demisto.args() input_arg_dict = { 'device_id': str(args.get('ids', '')).split(','), 'status': str(args.get('status', '')).split(','), 'hostname': str(args.get('hostname', '')).split(','), 'platform_name': str(args.get('platform_name', '')).split(','), 'site_name': str(args.get('site_name', '')).split(',') } url_filter = '{}'.format(str(args.get('filter', ''))) for k, arg in input_arg_dict.items(): if arg: if type(arg) is list: arg_filter = '' for arg_elem in arg: if arg_elem: first_arg = '{filter},{inp_arg}'.format(filter=arg_filter, inp_arg=k) if arg_filter else k arg_filter = "{first}:'{second}'".format(first=first_arg, second=arg_elem) if arg_filter: url_filter = "{url_filter}{arg_filter}".format(url_filter=url_filter + '+' if url_filter else '', arg_filter=arg_filter) else: # All args should be a list. this is a fallback url_filter = "{url_filter}+{inp_arg}:'{arg_val}'".format(url_filter=url_filter, inp_arg=k, arg_val=arg) raw_res = http_request('GET', '/devices/queries/devices/v1', params={'filter': url_filter}) device_ids = raw_res.get('resources') if not device_ids: return None return http_request('GET', '/devices/entities/devices/v1', params={'ids': device_ids}) def behavior_to_entry_context(behavior): """ Transforms a behavior to entry context representation :param behavior: Behavior dict in the format of crowdstrike's API response :return: Behavior in entry context representation """ raw_entry = get_trasnformed_dict(behavior, DETECTIONS_BEHAVIORS_KEY_MAP) raw_entry.update(extract_transformed_dict_with_split(behavior, DETECTIONS_BEHAVIORS_SPLIT_KEY_MAP)) return raw_entry def get_username_uuid(username: str): """ Obtain CrowdStrike user’s UUId by email. :param username: Username to get UUID of. :return: The user UUID """ response = http_request('GET', '/users/queries/user-uuids-by-email/v1', params={'uid': username}) resources: list = response.get('resources', []) if not resources: raise ValueError(f'User {username} was not found') return resources[0] def resolve_detection(ids, status, assigned_to_uuid, show_in_ui, comment): """ Sends a resolve detection request :param ids: Single or multiple ids in an array string format :param status: New status of the detection :param assigned_to_uuid: uuid to assign the detection to :param show_in_ui: Boolean flag in string format (true/false) :param comment: Optional comment to add to the detection :return: Resolve detection response json """ payload = { 'ids': ids } if status: payload['status'] = status if assigned_to_uuid: payload['assigned_to_uuid'] = assigned_to_uuid if show_in_ui: payload['show_in_ui'] = show_in_ui if comment: payload['comment'] = comment # We do this so show_in_ui value won't contain "" data = json.dumps(payload).replace('"show_in_ui": "false"', '"show_in_ui": false').replace('"show_in_ui": "true"', '"show_in_ui": true') return http_request('PATCH', '/detects/entities/detects/v2', data=data) def contain_host(ids): """ Contains host(s) with matching ids :param ids: IDs of host to contain :return: Contain host response json """ payload = { 'ids': ids } data = json.dumps(payload) params = { 'action_name': 'contain' } return http_request('POST', '/devices/entities/devices-actions/v2', data=data, params=params) def lift_host_containment(ids): """ Lifts off containment from host(s) with matchind ids :param ids: IDs of host to lift off containment from :return: Lift off containment response json """ payload = { 'ids': ids } data = json.dumps(payload) params = { 'action_name': 'lift_containment' } return http_request('POST', '/devices/entities/devices-actions/v2', data=data, params=params) def timestamp_length_equalization(timestamp1, timestamp2): """ Makes sure the timestamps are of the same length. Args: timestamp1: First timestamp to compare. timestamp2: Second timestamp to compare. Returns: the two timestamps in the same length (the longer one) """ diff_len = len(str(timestamp1)) - len(str(timestamp2)) # no difference in length if diff_len == 0: return int(timestamp1), int(timestamp2) # length of timestamp1 > timestamp2 if diff_len > 0: ten_times = pow(10, diff_len) timestamp2 = int(timestamp2) * ten_times # length of timestamp2 > timestamp1 else: ten_times = pow(10, diff_len * -1) timestamp1 = int(timestamp1) * ten_times return int(timestamp1), int(timestamp2) ''' COMMANDS FUNCTIONS ''' def get_fetch_times_and_offset(incident_type): last_run = demisto.getLastRun() last_fetch_time = last_run.get(f'first_behavior_{incident_type}_time') offset = last_run.get(f'{incident_type}_offset', 0) if not last_fetch_time: last_fetch_time, _ = parse_date_range(FETCH_TIME, date_format='%Y-%m-%dT%H:%M:%SZ') prev_fetch = last_fetch_time last_fetch_timestamp = int(parse(last_fetch_time).timestamp() * 1000) return last_fetch_time, offset, prev_fetch, last_fetch_timestamp def fetch_incidents(): """ Fetches incident using the detections API :return: Fetched detections in incident format """ incidents = [] # type:List fetch_incidents_or_detections = demisto.params().get('fetch_incidents_or_detections') if 'Detections' in fetch_incidents_or_detections: incident_type = 'detection' last_fetch_time, offset, prev_fetch, last_fetch_timestamp = get_fetch_times_and_offset(incident_type) fetch_query = demisto.params().get('fetch_query') if fetch_query: fetch_query = "created_timestamp:>'{time}'+{query}".format(time=last_fetch_time, query=fetch_query) detections_ids = demisto.get(get_fetch_detections(filter_arg=fetch_query, offset=offset), 'resources') else: detections_ids = demisto.get(get_fetch_detections(last_created_timestamp=last_fetch_time, offset=offset), 'resources') if detections_ids: raw_res = get_detections_entities(detections_ids) if "resources" in raw_res: raw_res['type'] = "detections" for detection in demisto.get(raw_res, "resources"): incident = detection_to_incident(detection) incident_date = incident['occurred'] incident_date_timestamp = int(parse(incident_date).timestamp() * 1000) # make sure that the two timestamps are in the same length if len(str(incident_date_timestamp)) != len(str(last_fetch_timestamp)): incident_date_timestamp, last_fetch_timestamp = timestamp_length_equalization( incident_date_timestamp, last_fetch_timestamp) # Update last run and add incident if the incident is newer than last fetch if incident_date_timestamp > last_fetch_timestamp: last_fetch_time = incident_date last_fetch_timestamp = incident_date_timestamp incidents.append(incident) if len(incidents) == INCIDENTS_PER_FETCH: demisto.setLastRun({'first_behavior_detection_time': prev_fetch, 'detection_offset': offset + INCIDENTS_PER_FETCH}) else: demisto.setLastRun({'first_behavior_detection_time': last_fetch_time}) if 'Incidents' in fetch_incidents_or_detections: incident_type = 'incident' last_fetch_time, offset, prev_fetch, last_fetch_timestamp = get_fetch_times_and_offset(incident_type) fetch_query = demisto.params().get('fetch_query') if fetch_query: fetch_query = "modified_timestamp:>'{time}'+{query}".format(time=last_fetch_time, query=fetch_query) incidents_ids = demisto.get(get_incidents_ids(filter_arg=fetch_query, offset=offset), 'resources') else: incidents_ids = demisto.get(get_incidents_ids(last_created_timestamp=last_fetch_time, offset=offset), 'resources') if incidents_ids: raw_res = get_incidents_entities(incidents_ids) if "resources" in raw_res: raw_res['type'] = "incidents" for incident in demisto.get(raw_res, "resources"): incident_to_context = incident_to_incident_context(incident) incident_date = incident_to_context['occurred'] incident_date_timestamp = int(parse(incident_date).timestamp() * 1000) # make sure that the two timestamps are in the same length if len(str(incident_date_timestamp)) != len(str(last_fetch_timestamp)): incident_date_timestamp, last_fetch_timestamp = timestamp_length_equalization( incident_date_timestamp, last_fetch_timestamp) # Update last run and add incident if the incident is newer than last fetch if incident_date_timestamp > last_fetch_timestamp: last_fetch_time = incident_date last_fetch_timestamp = incident_date_timestamp incidents.append(incident_to_context) if len(incidents) == INCIDENTS_PER_FETCH: demisto.setLastRun({'first_behavior_incident_time': prev_fetch, 'incident_offset': offset + INCIDENTS_PER_FETCH}) else: demisto.setLastRun({'first_behavior_incident_time': last_fetch_time}) return incidents def create_ioc_command(): """ UNTESTED - Creates an IoC :return: EntryObject of create IoC command """ raw_res = create_ioc() return create_entry_object(contents=raw_res, hr="Custom IoC was created successfully.") def search_iocs_command(): """ UNTESTED IN OAUTH 2 - Searches for an ioc :return: EntryObject of search IoC command """ raw_res = search_iocs() if not raw_res: return create_entry_object(hr='Could not find any Indicators of Compromise.') iocs = raw_res.get('resources') ec = [get_trasnformed_dict(ioc, SEARCH_IOC_KEY_MAP) for ioc in iocs] enrich_ioc_dict_with_ids(ec) return create_entry_object(contents=raw_res, ec={'CrowdStrike.IoC(val.ID === obj.ID)': ec}, hr=tableToMarkdown('Indicators of Compromise', ec)) def delete_iocs_command(): """ UNTESTED - Deletes an IoC :return: EntryObject of delete IoC command """ raw_res = delete_ioc() ids = demisto.args().get('ids') return create_entry_object(contents=raw_res, hr="Custom IoC {0} successfully deleted.".format(ids)) def update_iocs_command(): """ UNTESTED - Updates an IoC :return: EntryObject of update IoC command """ raw_res = update_iocs() ids = demisto.args().get('ids') return create_entry_object(contents=raw_res, hr="Custom IoC {0} successfully updated.".format(ids)) def search_device_command(): """ Searches for a device :return: EntryObject of search device command """ raw_res = search_device() if not raw_res: return create_entry_object(hr='Could not find any devices.') devices = raw_res.get('resources') entries = [get_trasnformed_dict(device, SEARCH_DEVICE_KEY_MAP) for device in devices] headers = ['ID', 'Hostname', 'OS', 'MacAddress', 'LocalIP', 'ExternalIP', 'FirstSeen', 'LastSeen'] hr = tableToMarkdown('Devices', entries, headers=headers, headerTransform=pascalToSpace) ec = {'CrowdStrike.Device(val.ID === obj.ID)': entries} return create_entry_object(contents=raw_res, ec=ec, hr=hr) def get_behavior_command(): """ Gets a behavior by ID :return: EntryObject of get behavior command """ behavior_id = demisto.args().get('behavior_id') detections_ids = demisto.get(get_detections(behavior_id=behavior_id), 'resources') raw_res = get_detections_entities(detections_ids) entries = [] if "resources" in raw_res: for resource in demisto.get(raw_res, "resources"): for behavior in demisto.get(resource, 'behaviors'): entries.append(behavior_to_entry_context(behavior)) hr = tableToMarkdown('Behavior ID: {}'.format(behavior_id), entries, headerTransform=pascalToSpace) # no dt since behavior vary by more than their ID ec = {'CrowdStrike.Behavior': entries} return create_entry_object(contents=raw_res, ec=ec, hr=hr) def search_detections_command(): """ Searches for a detection :return: EntryObject of search detections command """ d_args = demisto.args() detections_ids = argToList(d_args.get('ids')) if not detections_ids: filter_arg = d_args.get('filter') if not filter_arg: return_error('Command Error: Please provide at least one argument.') detections_ids = get_detections(filter_arg=filter_arg).get('resources') raw_res = get_detections_entities(detections_ids) entries = [] headers = ['ID', 'Status', 'System', 'ProcessStartTime', 'CustomerID', 'MaxSeverity'] if "resources" in raw_res: for detection in demisto.get(raw_res, "resources"): detection_entry = {} for path, new_key in DETECTIONS_BASE_KEY_MAP.items(): detection_entry[new_key] = demisto.get(detection, path) behaviors = [] for behavior in demisto.get(detection, 'behaviors'): behaviors.append(behavior_to_entry_context(behavior)) detection_entry['Behavior'] = behaviors entries.append(detection_entry) hr = tableToMarkdown('Detections Found:', entries, headers=headers, removeNull=True, headerTransform=pascalToSpace) ec = {'CrowdStrike.Detection(val.ID === obj.ID)': entries} return create_entry_object(contents=raw_res, ec=ec, hr=hr) def resolve_detection_command(): """ Resolves single or multiple detections :return: EntryObject of resolve detection command """ args = demisto.args() ids = argToList(args.get('ids')) username = args.get('username') assigned_to_uuid = args.get('assigned_to_uuid') comment = args.get('comment') if username and assigned_to_uuid: raise ValueError('Only one of the arguments assigned_to_uuid or username should be provided, not both.') if username: assigned_to_uuid = get_username_uuid(username) status = args.get('status') show_in_ui = args.get('show_in_ui') raw_res = resolve_detection(ids, status, assigned_to_uuid, show_in_ui, comment) args.pop('ids') hr = "Detection {0} updated\n".format(str(ids)[1:-1]) hr += 'With the following values:\n' for k, arg in args.items(): hr += '\t{name}:{val}\n'.format(name=k, val=arg) return create_entry_object(contents=raw_res, hr=hr) def contain_host_command(): """ Contains hosts with user arg ids :return: EntryObject of contain host command """ ids = argToList(demisto.args().get('ids')) raw_res = contain_host(ids) hr = "Host {} contained".format(str(ids)[1:-1]) return create_entry_object(contents=raw_res, hr=hr) def lift_host_containment_command(): """ Lifts containment off a host :return: EntryObject of lift host containment """ ids = argToList(demisto.args().get('ids')) raw_res = lift_host_containment(ids) hr = "Containment has been lift off host {}".format(str(ids)[1:-1]) return create_entry_object(contents=raw_res, hr=hr) def run_command(): args = demisto.args() host_ids = argToList(args.get('host_ids')) command_type = args.get('command_type') full_command = args.get('full_command') scope = args.get('scope', 'read') target = args.get('target', 'batch') output = [] if target == 'batch': if scope == 'read': response = run_batch_read_cmd(host_ids, command_type, full_command) elif scope == 'write': response = run_batch_write_cmd(host_ids, command_type, full_command) else: # scope = admin response = run_batch_admin_cmd(host_ids, command_type, full_command) resources: dict = response.get('combined', {}).get('resources', {}) for _, resource in resources.items(): errors = resource.get('errors', []) if errors: error_message = errors[0].get('message', '') if not error_message: error_message = f'Could not run command\n{errors}' return_error(error_message) output.append({ 'HostID': resource.get('aid'), 'SessionID': resource.get('session_id'), 'Stdout': resource.get('stdout'), 'Stderr': resource.get('stderr'), 'BaseCommand': resource.get('base_command'), 'Command': full_command }) human_readable = tableToMarkdown(f'Command {full_command} results', output, removeNull=True) entry_context_batch = { 'CrowdStrike': { 'Command': output } } return create_entry_object(contents=response, ec=entry_context_batch, hr=human_readable) else: # target = 'single' responses = [] for host_id in host_ids: if scope == 'read': response1 = run_single_read_cmd(host_id, command_type, full_command) elif scope == 'write': response1 = run_single_write_cmd(host_id, command_type, full_command) else: # scope = admin response1 = run_single_admin_cmd(host_id, command_type, full_command) responses.append(response1) for resource in response1.get('resources', []): errors = resource.get('errors', []) if errors: error_message = errors[0].get('message', '') if not error_message: error_message = f'Could not run command\n{errors}' return_error(error_message) output.append({ 'HostID': host_id, 'TaskID': resource.get('cloud_request_id'), 'SessionID': resource.get('session_id'), 'BaseCommand': command_type, 'Command': full_command, 'Complete': False, 'NextSequenceID': 0 }) human_readable = tableToMarkdown(f'Command {full_command} results', output, removeNull=True) entry_context_single = { 'CrowdStrike.Command(val.TaskID === obj.TaskID)': output } return create_entry_object(contents=responses, ec=entry_context_single, hr=human_readable) def upload_script_command(): args = demisto.args() name = args.get('name') permission_type = args.get('permission_type', 'private') content = args.get('content') entry_id = args.get('entry_id') if content and entry_id: raise ValueError('Only one of the arguments entry_id or content should be provided, not both.') elif not content and not entry_id: raise ValueError('One of the arguments entry_id or content must be provided, none given.') response = upload_script(name, permission_type, content, entry_id) return create_entry_object(contents=response, hr='The script was uploaded successfully') def get_script_command(): script_id = argToList(demisto.args().get('script_id')) response = get_script(script_id) resources: list = response.get('resources', []) if resources and isinstance(resources, list): resource = resources[0] script = { 'ID': resource.get('id'), 'CreatedBy': resource.get('created_by'), 'CreatedTime': resource.get('created_timestamp'), 'Description': resource.get('description'), 'ModifiedBy': resource.get('modified_by'), 'ModifiedTime': resource.get('modified_timestamp'), 'Name': resource.get('name'), 'Permission': resource.get('permission_type'), 'SHA256': resource.get('sha256'), 'RunAttemptCount': resource.get('run_attempt_count'), 'RunSuccessCount': resource.get('run_success_count'), 'WriteAccess': resource.get('write_access') } human_readable = tableToMarkdown(f'CrowdStrike Falcon script {script_id}', script) entry_context = { 'CrowdStrike.Script(val.ID === obj.ID)': script } script_content = resource.get('content') if script_content: demisto.results( fileResult( f"{resource.get("name", "script")}.ps1", script_content ) ) return create_entry_object(contents=response, ec=entry_context, hr=human_readable) else: return 'No script found.' def delete_script_command(): script_id = demisto.args().get('script_id') response = delete_script(script_id) return create_entry_object(contents=response, hr=f'Script {script_id} was deleted successfully') def list_scripts_command(): response = list_scripts() resources: list = response.get('resources', []) scripts = [] for resource in resources: scripts.append({ 'ID': resource.get('id'), 'CreatedBy': resource.get('created_by'), 'CreatedTime': resource.get('created_timestamp'), 'Description': resource.get('description'), 'ModifiedBy': resource.get('modified_by'), 'ModifiedTime': resource.get('modified_timestamp'), 'Name': resource.get('name'), 'Permission': resource.get('permission_type'), 'SHA256': resource.get('sha256'), 'RunAttemptCount': resource.get('run_attempt_count'), 'RunSuccessCount': resource.get('run_success_count'), 'Platform': resource.get('platform'), 'WriteAccess': resource.get('write_access') }) human_readable = tableToMarkdown('CrowdStrike Falcon scripts', scripts) entry_context = { 'CrowdStrike.Script(val.ID === obj.ID)': scripts } return create_entry_object(contents=response, ec=entry_context, hr=human_readable) def upload_file_command(): entry_id = demisto.args().get('entry_id') description = demisto.args().get('description', 'File uploaded from Demisto') response, file_name = upload_file(entry_id, description) return create_entry_object(contents=response, hr='File was uploaded successfully') def delete_file_command(): file_id = demisto.args().get('file_id') response = delete_file(file_id) return create_entry_object(contents=response, hr=f'File {file_id} was deleted successfully') def get_file_command(): file_id = argToList(demisto.args().get('file_id')) response = get_file(file_id) resources: list = response.get('resources', []) if resources and isinstance(resources, list): # will always be a list of one resource resource = resources[0] file_ = { 'ID': resource.get('id'), 'CreatedBy': resource.get('created_by'), 'CreatedTime': resource.get('created_timestamp'), 'Description': resource.get('description'), 'Type': resource.get('file_type'), 'ModifiedBy': resource.get('modified_by'), 'ModifiedTime': resource.get('modified_timestamp'), 'Name': resource.get('name'), 'Permission': resource.get('permission_type'), 'SHA256': resource.get('sha256'), } file_standard_context = { 'Type': resource.get('file_type'), 'Name': resource.get('name'), 'SHA256': resource.get('sha256'), 'Size': resource.get('size'), } human_readable = tableToMarkdown(f'CrowdStrike Falcon file {file_id}', file_) entry_context = { 'CrowdStrike.File(val.ID === obj.ID)': file_, outputPaths['file']: file_standard_context } file_content = resource.get('content') if file_content: demisto.results( fileResult( resource.get('name'), file_content ) ) return create_entry_object(contents=response, ec=entry_context, hr=human_readable) else: return 'No file found.' def list_files_command(): response = list_files() resources: list = response.get('resources', []) files_output = [] file_standard_context = [] for resource in resources: files_output.append({ 'ID': resource.get('id'), 'CreatedBy': resource.get('created_by'), 'CreatedTime': resource.get('created_timestamp'), 'Description': resource.get('description'), 'Type': resource.get('file_type'), 'ModifiedBy': resource.get('modified_by'), 'ModifiedTime': resource.get('modified_timestamp'), 'Name': resource.get('name'), 'Permission': resource.get('permission_type'), 'SHA256': resource.get('sha256'), }) file_standard_context.append({ 'Type': resource.get('file_type'), 'Name': resource.get('name'), 'SHA256': resource.get('sha256'), 'Size': resource.get('size'), }) human_readable = tableToMarkdown('CrowdStrike Falcon files', files_output) entry_context = { 'CrowdStrike.File(val.ID === obj.ID)': files_output, outputPaths['file']: file_standard_context } return create_entry_object(contents=response, ec=entry_context, hr=human_readable) def run_script_command(): args = demisto.args() script_name = args.get('script_name') raw = args.get('raw') host_ids = argToList(args.get('host_ids')) if script_name and raw: raise ValueError('Only one of the arguments script_name or raw should be provided, not both.') elif not script_name and not raw: raise ValueError('One of the arguments script_name or raw must be provided, none given.') elif script_name: full_command = f'runscript -CloudFile={script_name}' elif raw: full_command = f'runscript -Raw=```{raw}```' command_type = 'runscript' response = run_batch_admin_cmd(host_ids, command_type, full_command) resources: dict = response.get('combined', {}).get('resources', {}) output = [] for _, resource in resources.items(): errors = resource.get('errors', []) if errors: error_message = errors[0].get('message', '') if not error_message: error_message = f'Could not run command\n{errors}' return_error(error_message) full_command = full_command.replace('`', '') output.append({ 'HostID': resource.get('aid'), 'SessionID': resource.get('session_id'), 'Stdout': resource.get('stdout'), 'Stderr': resource.get('stderr'), 'BaseCommand': resource.get('base_command'), 'Command': full_command }) human_readable = tableToMarkdown(f'Command {full_command} results', output) entry_context = { 'CrowdStrike': { 'Command': output } } return create_entry_object(contents=response, ec=entry_context, hr=human_readable) def run_get_command(): args = demisto.args() host_ids = argToList(args.get('host_ids')) file_path = args.get('file_path') optional_hosts = argToList(args.get('optional_hosts')) timeout = args.get('timeout') timeout_duration = args.get('timeout_duration') timeout = timeout and int(timeout) response = run_batch_get_cmd(host_ids, file_path, optional_hosts, timeout, timeout_duration) resources: dict = response.get('combined', {}).get('resources', {}) output = [] for _, resource in resources.items(): errors = resource.get('errors', []) if errors: error_message = errors[0].get('message', '') if not error_message: error_message = f'Could not get command\n{errors}' return_error(error_message) output.append({ 'HostID': resource.get('aid'), 'Stdout': resource.get('stdout'), 'Stderr': resource.get('stderr'), 'BaseCommand': resource.get('base_command'), 'TaskID': resource.get('task_id'), 'GetRequestID': response.get('batch_get_cmd_req_id'), 'Complete': resource.get('complete') or False, 'FilePath': file_path }) human_readable = tableToMarkdown(f'Get command has requested for a file {file_path}', output) entry_context = { 'CrowdStrike.Command(val.TaskID === obj.TaskID)': output } return create_entry_object(contents=response, ec=entry_context, hr=human_readable) def status_get_command(): args = demisto.args() request_ids = argToList(args.get('request_ids')) timeout = args.get('timeout') timeout_duration = args.get('timeout_duration') timeout = timeout and int(timeout) responses = [] files_output = [] file_standard_context = [] for request_id in request_ids: response = status_get_cmd(request_id, timeout, timeout_duration) responses.append(response) resources: dict = response.get('resources', {}) for _, resource in resources.items(): errors = resource.get('errors', []) if errors: error_message = errors[0].get('message', '') if not error_message: error_message = f'Could not get command\n{errors}' return_error(error_message) files_output.append({ 'ID': resource.get('id'), 'TaskID': resource.get('cloud_request_id'), 'CreatedAt': resource.get('created_at'), 'DeletedAt': resource.get('deleted_at'), 'UpdatedAt': resource.get('updated_at'), 'Name': resource.get('name'), 'Size': resource.get('size'), 'SHA256': resource.get('sha256') }) file_standard_context.append({ 'Name': resource.get('name'), 'SHA256': resource.get('sha256'), 'Size': resource.get('size'), }) human_readable = tableToMarkdown('CrowdStrike Falcon files', files_output) entry_context = { 'CrowdStrike.File(val.ID === obj.ID || val.TaskID === obj.TaskID)': files_output, outputPaths['file']: file_standard_context } if len(responses) == 1: return create_entry_object(contents=responses[0], ec=entry_context, hr=human_readable) else: return create_entry_object(contents=response, ec=entry_context, hr=human_readable) def status_command(): args = demisto.args() request_id = args.get('request_id') sequence_id = args.get('sequence_id') scope = args.get('scope', 'read') sequence_id = None if sequence_id is None else int(sequence_id) if scope == 'read': response = status_read_cmd(request_id, sequence_id) elif scope == 'write': response = status_write_cmd(request_id, sequence_id) else: # scope = admin response = status_admin_cmd(request_id, sequence_id) resources: list = response.get('resources', []) output = [] for resource in resources: errors = resource.get('errors', []) if errors: error_message = errors[0].get('message', '') if not error_message: error_message = f'Could not run command\n{errors}' return_error(error_message) sequence_id = int(resource.get('sequence_id', 0)) output.append({ 'Complete': resource.get('complete') or False, 'Stdout': resource.get('stdout'), 'Stderr': resource.get('stderr'), 'BaseCommand': resource.get('base_command'), 'TaskID': resource.get('task_id'), 'SequenceID': sequence_id, 'NextSequenceID': sequence_id + 1 }) human_readable = tableToMarkdown('Command status results', output, removeNull=True) entry_context = { 'CrowdStrike.Command(val.TaskID === obj.TaskID)': output } return create_entry_object(contents=response, ec=entry_context, hr=human_readable) def get_extracted_file_command(): args = demisto.args() host_id = args.get('host_id') sha256 = args.get('sha256') filename = args.get('filename') response = get_extracted_file(host_id, sha256, filename) # save an extracted file content_type = response.headers.get('Content-Type', '').lower() if content_type == 'application/x-7z-compressed': content_disposition = response.headers.get('Content-Disposition', '').lower() if content_disposition: filename = email.message_from_string(f'Content-Disposition: {content_disposition}\n\n').get_filename() if not filename: sha256 = sha256 or hashlib.sha256(response.content).hexdigest() filename = sha256.lower() + '.7z' return fileResult(filename, response.content) return_error('An extracted file is missing in the response') def list_host_files_command(): args = demisto.args() host_id = args.get('host_id') response = list_host_files(host_id) resources: list = response.get('resources', []) files_output = [] file_standard_context = [] command_output = [] for resource in resources: errors = resource.get('errors', []) if errors: error_message = errors[0].get('message', '') if not error_message: error_message = f'Could not run command\n{errors}' return_error(error_message) command_output.append({ 'HostID': host_id, 'TaskID': resource.get('cloud_request_id'), 'SessionID': resource.get('session_id') }) files_output.append({ 'ID': resource.get('id'), 'CreatedAt': resource.get('created_at'), 'DeletedAt': resource.get('deleted_at'), 'UpdatedAt': resource.get('updated_at'), 'Name': resource.get('name'), 'SHA256': resource.get('sha256'), 'Size': resource.get('size'), 'Stdout': resource.get('stdout'), 'Stderr': resource.get('stderr') }) file_standard_context.append({ 'Name': resource.get('name'), 'SHA256': resource.get('sha256'), 'Size': resource.get('size'), }) if files_output: human_readable = tableToMarkdown('CrowdStrike Falcon files', files_output) else: human_readable = 'No result found' entry_context = { 'CrowdStrike.Command(val.TaskID === obj.TaskID)': command_output, 'CrowdStrike.File(val.ID === obj.ID)': files_output, outputPaths['file']: file_standard_context } return create_entry_object(contents=response, ec=entry_context, hr=human_readable) def refresh_session_command(): args = demisto.args() host_id = args.get('host_id') response = refresh_session(host_id) resources: list = response.get('resources', []) session_id = None for resource in resources: errors = resource.get('errors', []) if errors: error_message = errors[0].get('message', '') if not error_message: error_message = f'Could not run command\n{errors}' return_error(error_message) session_id = resource.get('session_id') return create_entry_object(contents=response, hr=f'CrowdStrike Session Refreshed: {session_id}') def build_url_filter_for_device_id(args): indicator_type = args.get('type') indicator_value = args.get('value') url_filter = f'/indicators/queries/devices/v1?type={indicator_type}&value={indicator_value}' return url_filter def build_error_message(raw_res): if raw_res.get('errors'): error_data = raw_res.get('errors')[0] else: error_data = {"code": 'None', "message": 'something got wrong, please try again'} error_code = error_data.get('code') error_message = error_data.get('message') return f'Error: error code: {error_code}, error_message: {error_message}.' def validate_response(raw_res): return 'resources' in raw_res.keys() def get_indicator_device_id(): args = demisto.args() url_filter = build_url_filter_for_device_id(args) raw_res = http_request('GET', url_filter) context_output = '' if validate_response(raw_res): context_output = raw_res.get('resources') else: error_message = build_error_message(raw_res) return_error(error_message) return CommandResults( readable_output=context_output, outputs_prefix='CrowdStrike.DeviceID', outputs_key_field='DeviceID', outputs=context_output ) def detections_to_human_readable(detections): detections_readable_outputs = [] for detection in detections: readable_output = assign_params(status=detection.get('status'), max_severity=detection.get('max_severity_displayname'), detection_id=detection.get('detection_id'), created_time=detection.get('created_timestamp')) detections_readable_outputs.append(readable_output) headers = ['detection_id', 'created_time', 'status', 'max_severity'] human_readable = tableToMarkdown('CrowdStrike Detections', detections_readable_outputs, headers, removeNull=True) return human_readable def list_detection_summaries_command(): fetch_query = demisto.args().get('fetch_query') if fetch_query: fetch_query = "{query}".format(query=fetch_query) detections_ids = demisto.get(get_fetch_detections(filter_arg=fetch_query), 'resources') else: detections_ids = demisto.get(get_fetch_detections(), 'resources') detections_response_data = get_detections_entities(detections_ids) detections = [resource for resource in detections_response_data.get('resources')] detections_human_readable = detections_to_human_readable(detections) return CommandResults( readable_output=detections_human_readable, outputs_prefix='CrowdStrike.Detections', outputs_key_field='detection_id', outputs=detections ) def incidents_to_human_readable(incidents): incidents_readable_outputs = [] for incident in incidents: readable_output = assign_params(description=incident.get('description'), state=incident.get('state'), name=incident.get('name'), tags=incident.get('tags'), incident_id=incident.get('incident_id'), created_time=incident.get('created')) incidents_readable_outputs.append(readable_output) headers = ['incident_id', 'created_time', 'name', 'description', 'state', 'tags'] human_readable = tableToMarkdown('CrowdStrike Incidents', incidents_readable_outputs, headers, removeNull=True) return human_readable def list_incident_summaries_command(): fetch_query = demisto.args().get('fetch_query') if fetch_query: fetch_query = "{query}".format(query=fetch_query) incidents_ids = get_incidents_ids(filter_arg=fetch_query) else: incidents_ids = get_incidents_ids() incidents_response_data = get_incidents_entities(incidents_ids) incidents = [resource for resource in incidents_response_data.get('resources')] incidents_human_readable = detections_to_human_readable(incidents) return CommandResults( readable_output=incidents_human_readable, outputs_prefix='CrowdStrike.Incidents', outputs_key_field='incident_id', outputs=incidents ) def test_module(): try: get_token(new_token=True) except ValueError: return 'Connection Error: The URL or The API key you entered is probably incorrect, please try again.' if demisto.params().get('isFetch'): try: fetch_incidents() except ValueError: return 'Error: Something is wrong with the filters you entered for the fetch incident, please try again.' return 'ok' ''' COMMANDS MANAGER / SWITCH PANEL ''' def main(): LOG('Command being called is {}'.format(demisto.command())) # should raise error in case of issue if demisto.command() == 'fetch-incidents': demisto.incidents(fetch_incidents()) try: if demisto.command() == 'test-module': result = test_module() return_results(result) get_token(new_token=True) demisto.results('ok') elif demisto.command() == 'cs-device-ran-on': return_results(get_indicator_device_id()) elif demisto.command() == 'cs-falcon-search-device': demisto.results(search_device_command()) elif demisto.command() == 'cs-falcon-get-behavior': demisto.results(get_behavior_command()) elif demisto.command() == 'cs-falcon-search-detection': demisto.results(search_detections_command()) elif demisto.command() == 'cs-falcon-resolve-detection': demisto.results(resolve_detection_command()) elif demisto.command() == 'cs-falcon-contain-host': demisto.results(contain_host_command()) elif demisto.command() == 'cs-falcon-lift-host-containment': demisto.results(lift_host_containment_command()) elif demisto.command() == 'cs-falcon-run-command': demisto.results(run_command()) elif demisto.command() == 'cs-falcon-upload-script': demisto.results(upload_script_command()) elif demisto.command() == 'cs-falcon-get-script': demisto.results(get_script_command()) elif demisto.command() == 'cs-falcon-delete-script': demisto.results(delete_script_command()) elif demisto.command() == 'cs-falcon-list-scripts': demisto.results(list_scripts_command()) elif demisto.command() == 'cs-falcon-upload-file': demisto.results(upload_file_command()) elif demisto.command() == 'cs-falcon-delete-file': demisto.results(delete_file_command()) elif demisto.command() == 'cs-falcon-get-file': demisto.results(get_file_command()) elif demisto.command() == 'cs-falcon-list-files': demisto.results(list_files_command()) elif demisto.command() == 'cs-falcon-run-script': demisto.results(run_script_command()) elif demisto.command() == 'cs-falcon-run-get-command': demisto.results(run_get_command()) elif demisto.command() == 'cs-falcon-status-get-command': demisto.results(status_get_command()) elif demisto.command() == 'cs-falcon-status-command': demisto.results(status_command()) elif demisto.command() == 'cs-falcon-get-extracted-file': demisto.results(get_extracted_file_command()) elif demisto.command() == 'cs-falcon-list-host-files': demisto.results(list_host_files_command()) elif demisto.command() == 'cs-falcon-refresh-session': demisto.results(refresh_session_command()) elif demisto.command() == 'cs-falcon-list-detection-summaries': return_results(list_detection_summaries_command()) elif demisto.command() == 'cs-falcon-list-incident-summaries': return_results(list_incident_summaries_command()) # Log exceptions except Exception as e: return_error(str(e)) if __name__ in ('__main__', 'builtin', 'builtins'): main()
import demistomock as demisto from CommonServerPython import * from CommonServerUserPython import * ''' IMPORTS ''' import json import requests import base64 import email import hashlib from typing import List from dateutil.parser import parse from typing import Dict, Tuple, Any, Optional, Union # Disable insecure warnings requests.packages.urllib3.disable_warnings() ''' GLOBALS/PARAMS ''' CLIENT_ID = demisto.params().get('client_id') SECRET = demisto.params().get('secret') # Remove trailing slash to prevent wrong URL path to service SERVER = demisto.params()['url'][:-1] if (demisto.params()['url'] and demisto.params()['url'].endswith('/')) else \ demisto.params()['url'] # Should we use SSL USE_SSL = not demisto.params().get('insecure', False) # How many time before the first fetch to retrieve incidents FETCH_TIME = demisto.params().get('fetch_time', '3 days') BYTE_CREDS = '{name}:{password}'.format(name=CLIENT_ID, password=SECRET).encode('utf-8') # Headers to be sent in requests HEADERS = { 'Content-Type': 'application/json', 'Accept': 'application/json', 'Authorization': 'Basic {}'.format(base64.b64encode(BYTE_CREDS).decode()) } # Note: True life time of token is actually 30 mins TOKEN_LIFE_TIME = 28 INCIDENTS_PER_FETCH = int(demisto.params().get('incidents_per_fetch', 15)) # Remove proxy if not set to true in params handle_proxy() ''' KEY DICTIONARY ''' DETECTIONS_BASE_KEY_MAP = { 'device.hostname': 'System', 'device.cid': 'CustomerID', 'hostinfo.domain': 'MachineDomain', 'detection_id': 'ID', 'created_timestamp': 'ProcessStartTime', 'max_severity': 'MaxSeverity', 'show_in_ui': 'ShowInUi', 'status': 'Status' } DETECTIONS_BEHAVIORS_KEY_MAP = { 'filename': 'FileName', 'scenario': 'Scenario', 'md5': 'MD5', 'sha256': 'SHA256', 'ioc_type': 'IOCType', 'ioc_value': 'IOCValue', 'cmdline': 'CommandLine', 'user_name': 'UserName', 'behavior_id': 'ID', } SEARCH_IOC_KEY_MAP = { 'type': 'Type', 'value': 'Value', 'policy': 'Policy', 'source': 'Source', 'share_level': 'ShareLevel', 'expiration_timestamp': 'Expiration', 'description': 'Description', 'created_timestamp': 'CreatedTime', 'created_by': 'CreatedBy', 'modified_timestamp': 'ModifiedTime', 'modified_by': 'ModifiedBy' } SEARCH_DEVICE_KEY_MAP = { 'device_id': 'ID', 'external_ip': 'ExternalIP', 'local_ip': 'LocalIP', 'hostname': 'Hostname', 'os_version': 'OS', 'mac_address': 'MacAddress', 'first_seen': 'FirstSeen', 'last_seen': 'LastSeen' } ''' SPLIT KEY DICTIONARY ''' """ Pattern: { 'Path': 'Path to item', 'NewKey': 'Value of output key', 'Delim': 'Delimiter char', 'Index': Split Array Index } """ DETECTIONS_BEHAVIORS_SPLIT_KEY_MAP = [ { 'Path': 'parent_details.parent_process_graph_id', 'NewKey': 'SensorID', 'Delim': ':', 'Index': 1 }, { 'Path': 'parent_details.parent_process_graph_id', 'NewKey': 'ParentProcessID', 'Delim': ':', 'Index': 2 }, { 'Path': 'triggering_process_graph_id', 'NewKey': 'ProcessID', 'Delim': ':', 'Index': 2 }, ] ''' HELPER FUNCTIONS ''' def http_request(method, url_suffix, params=None, data=None, files=None, headers=HEADERS, safe=False, get_token_flag=True, no_json=False): """ A wrapper for requests lib to send our requests and handle requests and responses better. :type method: ``str`` :param method: HTTP method for the request. :type url_suffix: ``str`` :param url_suffix: The suffix of the URL (endpoint) :type params: ``dict`` :param params: The URL params to be passed. :type data: ``str`` :param data: The body data of the request. :type headers: ``dict`` :param headers: Request headers :type safe: ``bool`` :param safe: If set to true will return None in case of http error :type get_token_flag: ``bool`` :param get_token_flag: If set to True will call get_token() :type no_json: ``bool`` :param no_json: If set to true will not parse the content and will return the raw response object for successful response :return: Returns the http request response json :rtype: ``dict`` """ if get_token_flag: token = get_token() headers['Authorization'] = 'Bearer {}'.format(token) url = SERVER + url_suffix try: res = requests.request( method, url, verify=USE_SSL, params=params, data=data, headers=headers, files=files ) except requests.exceptions.RequestException: return_error('Error in connection to the server. Please make sure you entered the URL correctly.') try: if res.status_code not in {200, 201, 202, 204}: res_json = res.json() reason = res.reason resources = res_json.get('resources', {}) if resources: for host_id, resource in resources.items(): errors = resource.get('errors', []) if errors: error_message = errors[0].get('message') reason += f'\nHost ID {host_id} - {error_message}' elif res_json.get('errors'): errors = res_json.get('errors', []) for error in errors: reason += f"\n{error.get('message')}" err_msg = 'Error in API call to CrowdStrike Falcon: code: {code} - reason: {reason}'.format( code=res.status_code, reason=reason ) # try to create a new token if res.status_code == 403 and get_token_flag: LOG(err_msg) token = get_token(new_token=True) headers['Authorization'] = 'Bearer {}'.format(token) return http_request(method, url_suffix, params, data, headers, safe, get_token_flag=False) elif safe: return None return_error(err_msg) return res if no_json else res.json() except ValueError as exception: raise ValueError( f'Failed to parse json object from response: {exception} - {res.content}') # type: ignore[str-bytes-safe] def create_entry_object(contents: Union[List[Any], Dict[str, Any]] = {}, ec: Union[List[Any], Dict[str, Any]] = None, hr: str = ''): """ Creates an entry object :type contents: ``dict`` :param contents: Raw response to output :type ec: ``dict`` :param ec: Entry context of the entry object :type hr: ``str`` :param hr: Human readable :return: Entry object :rtype: ``dict`` """ return { 'Type': entryTypes['note'], 'Contents': contents, 'ContentsFormat': formats['json'], 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': hr, 'EntryContext': ec } def detection_to_incident(detection): """ Creates an incident of a detection. :type detection: ``dict`` :param detection: Single detection object :return: Incident representation of a detection :rtype ``dict`` """ incident = { 'name': 'Detection ID: ' + str(detection.get('detection_id')), 'occurred': str(detection.get('created_timestamp')), 'rawJSON': json.dumps(detection), 'severity': severity_string_to_int(detection.get('max_severity_displayname')) } return incident def incident_to_incident_context(incident): """ Creates an incident context of a incident. :type incident: ``dict`` :param incident: Single detection object :return: Incident context representation of a incident :rtype ``dict`` """ incident_id = str(incident.get('incident_id')) incident_hosts = incident.get('hosts')[0] incident_context = { 'name': f'Incident ID: {incident_id}', 'occurred': incident_hosts.get('modified_timestamp'), 'rawJSON': json.dumps(incident) } return incident_context def severity_string_to_int(severity): """ Converts a severity string to DBot score representation :type severity: ``str`` :param severity: String representation of a severity :return: DBot score representation of the severity :rtype ``int`` """ if severity in ('Critical', 'High'): return 3 elif severity in ('Medium', 'Low'): return 2 return 0 def get_trasnformed_dict(old_dict, transformation_dict): """ Returns a dictionary with the same values as old_dict, with the correlating key:value in transformation_dict :type old_dict: ``dict`` :param old_dict: Old dictionary to pull values from :type transformation_dict: ``dict`` :param transformation_dict: Transformation dictionary that contains oldkeys:newkeys :return Transformed dictionart (according to transformation_dict values) :rtype ``dict`` """ new_dict = {} for k in list(old_dict.keys()): if k in transformation_dict: new_dict[transformation_dict[k]] = old_dict[k] return new_dict def extract_transformed_dict_with_split(old_dict, transformation_dict_arr): """ Extracts new values out of old_dict using a json structure of: {'Path': 'Path to item', 'NewKey': 'Value of output key', 'Delim': 'Delimiter char', 'Index': Split Array Index} """ new_dict = {} for trans_dict in transformation_dict_arr: try: val = demisto.get(old_dict, trans_dict['Path']) if 'split' in dir(val): i = trans_dict['Index'] new_dict[trans_dict['NewKey']] = val.split(trans_dict['Delim'])[i] except Exception as ex: LOG('Error {exception} with: {tdict}'.format(exception=ex, tdict=trans_dict)) return new_dict def get_passed_mins(start_time, end_time_str): """ Returns the time passed in mins :param start_time: Start time in datetime :param end_time_str: End time in str :return: The passed mins in int """ time_delta = start_time - datetime.fromtimestamp(end_time_str) return time_delta.seconds / 60 ''' COMMAND SPECIFIC FUNCTIONS ''' def init_rtr_single_session(host_id: str) -> str: """ Start a session with single host. :param host_id: Host agent ID to initialize a RTR session on. :return: The session ID to execute the command on """ endpoint_url = '/real-time-response/entities/sessions/v1' body = json.dumps({ 'device_id': host_id }) response = http_request('POST', endpoint_url, data=body) resources = response.get('resources') if resources and isinstance(resources, list) and isinstance(resources[0], dict): session_id = resources[0].get('session_id') if isinstance(session_id, str): return session_id raise ValueError('No session id found in the response') def init_rtr_batch_session(host_ids: list) -> str: """ Start a session with one or more hosts :param host_ids: List of host agent ID’s to initialize a RTR session on. :return: The session batch ID to execute the command on """ endpoint_url = '/real-time-response/combined/batch-init-session/v1' body = json.dumps({ 'host_ids': host_ids }) response = http_request('POST', endpoint_url, data=body) return response.get('batch_id') def refresh_session(host_id: str) -> Dict: """ Refresh a session timeout on a single host. :param host_id: Host agent ID to run RTR command on. :return: Response JSON which contains errors (if exist) and retrieved resources """ endpoint_url = '/real-time-response/entities/refresh-session/v1' body = json.dumps({ 'device_id': host_id }) response = http_request('POST', endpoint_url, data=body) return response def run_batch_read_cmd(host_ids: list, command_type: str, full_command: str) -> Dict: """ Sends RTR command scope with read access :param host_ids: List of host agent ID’s to run RTR command on. :param command_type: Read-only command type we are going to execute, for example: ls or cd. :param full_command: Full command string for the command. :return: Response JSON which contains errors (if exist) and retrieved resources """ endpoint_url = '/real-time-response/combined/batch-command/v1' batch_id = init_rtr_batch_session(host_ids) body = json.dumps({ 'base_command': command_type, 'batch_id': batch_id, 'command_string': full_command }) response = http_request('POST', endpoint_url, data=body) return response def run_batch_write_cmd(host_ids: list, command_type: str, full_command: str) -> Dict: """ Sends RTR command scope with write access :param host_ids: List of host agent ID’s to run RTR command on. :param command_type: Read-only command type we are going to execute, for example: ls or cd. :param full_command: Full command string for the command. :return: Response JSON which contains errors (if exist) and retrieved resources """ endpoint_url = '/real-time-response/combined/batch-active-responder-command/v1' batch_id = init_rtr_batch_session(host_ids) body = json.dumps({ 'base_command': command_type, 'batch_id': batch_id, 'command_string': full_command }) response = http_request('POST', endpoint_url, data=body) return response def run_batch_admin_cmd(host_ids: list, command_type: str, full_command: str) -> Dict: """ Sends RTR command scope with write access :param host_ids: List of host agent ID’s to run RTR command on. :param command_type: Read-only command type we are going to execute, for example: ls or cd. :param full_command: Full command string for the command. :return: Response JSON which contains errors (if exist) and retrieved resources """ endpoint_url = '/real-time-response/combined/batch-admin-command/v1' batch_id = init_rtr_batch_session(host_ids) body = json.dumps({ 'base_command': command_type, 'batch_id': batch_id, 'command_string': full_command }) response = http_request('POST', endpoint_url, data=body) return response def run_batch_get_cmd(host_ids: list, file_path: str, optional_hosts: list = None, timeout: int = None, timeout_duration: str = None) -> Dict: """ Batch executes `get` command across hosts to retrieve files. After this call is made `/real-time-response/combined/batch-get-command/v1` is used to query for the results. :param host_ids: List of host agent ID’s to run RTR command on. :param file_path: Full path to the file that is to be retrieved from each host in the batch. :param optional_hosts: List of a subset of hosts we want to run the command on. If this list is supplied, only these hosts will receive the command. :param timeout: Timeout for how long to wait for the request in seconds :param timeout_duration: Timeout duration for for how long to wait for the request in duration syntax :return: Response JSON which contains errors (if exist) and retrieved resources """ endpoint_url = '/real-time-response/combined/batch-get-command/v1' batch_id = init_rtr_batch_session(host_ids) body = assign_params(batch_id=batch_id, file_path=file_path, optional_hosts=optional_hosts) params = assign_params(timeout=timeout, timeout_duration=timeout_duration) response = http_request('POST', endpoint_url, data=json.dumps(body), params=params) return response def status_get_cmd(request_id: str, timeout: int = None, timeout_duration: str = None) -> Dict: """ Retrieves the status of the specified batch get command. Will return successful files when they are finished processing. :param request_id: ID to the request of `get` command. :param timeout: Timeout for how long to wait for the request in seconds :param timeout_duration: Timeout duration for for how long to wait for the request in duration syntax :return: Response JSON which contains errors (if exist) and retrieved resources """ endpoint_url = '/real-time-response/combined/batch-get-command/v1' params = assign_params(timeout=timeout, timeout_duration=timeout_duration, batch_get_cmd_req_id=request_id) response = http_request('GET', endpoint_url, params=params) return response def run_single_read_cmd(host_id: str, command_type: str, full_command: str) -> Dict: """ Sends RTR command scope with read access :param host_id: Host agent ID to run RTR command on. :param command_type: Active-Responder command type we are going to execute, for example: get or cp. :param full_command: Full command string for the command. :return: Response JSON which contains errors (if exist) and retrieved resources """ endpoint_url = '/real-time-response/entities/command/v1' session_id = init_rtr_single_session(host_id) body = json.dumps({ 'base_command': command_type, 'command_string': full_command, 'session_id': session_id }) response = http_request('POST', endpoint_url, data=body) return response def run_single_write_cmd(host_id: str, command_type: str, full_command: str) -> Dict: """ Sends RTR command scope with write access :param host_id: Host agent ID to run RTR command on. :param command_type: Active-Responder command type we are going to execute, for example: get or cp. :param full_command: Full command string for the command. :return: Response JSON which contains errors (if exist) and retrieved resources """ endpoint_url = '/real-time-response/entities/active-responder-command/v1' session_id = init_rtr_single_session(host_id) body = json.dumps({ 'base_command': command_type, 'command_string': full_command, 'session_id': session_id }) response = http_request('POST', endpoint_url, data=body) return response def run_single_admin_cmd(host_id: str, command_type: str, full_command: str) -> Dict: """ Sends RTR command scope with admin access :param host_id: Host agent ID to run RTR command on. :param command_type: Active-Responder command type we are going to execute, for example: get or cp. :param full_command: Full command string for the command. :return: Response JSON which contains errors (if exist) and retrieved resources """ endpoint_url = '/real-time-response/entities/admin-command/v1' session_id = init_rtr_single_session(host_id) body = json.dumps({ 'base_command': command_type, 'command_string': full_command, 'session_id': session_id }) response = http_request('POST', endpoint_url, data=body) return response def status_read_cmd(request_id: str, sequence_id: Optional[int]) -> Dict: """ Get status of an executed command with read access on a single host. :param request_id: Cloud Request ID of the executed command to query :param sequence_id: Sequence ID that we want to retrieve. Command responses are chunked across sequences """ endpoint_url = '/real-time-response/entities/command/v1' params = { 'cloud_request_id': request_id, 'sequence_id': sequence_id or 0 } response = http_request('GET', endpoint_url, params=params) return response def status_write_cmd(request_id: str, sequence_id: Optional[int]) -> Dict: """ Get status of an executed command with write access on a single host. :param request_id: Cloud Request ID of the executed command to query :param sequence_id: Sequence ID that we want to retrieve. Command responses are chunked across sequences """ endpoint_url = '/real-time-response/entities/active-responder-command/v1' params = { 'cloud_request_id': request_id, 'sequence_id': sequence_id or 0 } response = http_request('GET', endpoint_url, params=params) return response def status_admin_cmd(request_id: str, sequence_id: Optional[int]) -> Dict: """ Get status of an executed command with admin access on a single host. :param request_id: Cloud Request ID of the executed command to query :param sequence_id: Sequence ID that we want to retrieve. Command responses are chunked across sequences """ endpoint_url = '/real-time-response/entities/admin-command/v1' params = { 'cloud_request_id': request_id, 'sequence_id': sequence_id or 0 } response = http_request('GET', endpoint_url, params=params) return response def list_host_files(host_id: str) -> Dict: """ Get a list of files for the specified RTR session on a host. :param host_id: Host agent ID to run RTR command on. :return: Response JSON which contains errors (if exist) and retrieved resources """ endpoint_url = '/real-time-response/entities/file/v1' session_id = init_rtr_single_session(host_id) params = { 'session_id': session_id } response = http_request('GET', endpoint_url, params=params) return response def upload_script(name: str, permission_type: str, content: str, entry_id: str) -> Dict: """ Uploads a script by either given content or file :param name: Script name to upload :param permission_type: Permissions type of script to upload :param content: PowerShell script content :param entry_id: Script file to upload :return: Response JSON which contains errors (if exist) and how many resources were affected """ endpoint_url = '/real-time-response/entities/scripts/v1' body: Dict[str, Tuple[Any, Any]] = { 'name': (None, name), 'permission_type': (None, permission_type) } temp_file = None try: if content: body['content'] = (None, content) else: # entry_id was provided file_ = demisto.getFilePath(entry_id) file_name = file_.get('name') # pylint: disable=E1101 temp_file = open(file_.get('path'), 'rb') # pylint: disable=E1101 body['file'] = (file_name, temp_file) headers = { 'Authorization': HEADERS['Authorization'], 'Accept': 'application/json' } response = http_request('POST', endpoint_url, files=body, headers=headers) return response finally: if temp_file: temp_file.close() def get_script(script_id: list) -> Dict: """ Retrieves a script given its ID :param script_id: ID of script to get :return: Response JSON which contains errors (if exist) and retrieved resource """ endpoint_url = '/real-time-response/entities/scripts/v1' params = { 'ids': script_id } response = http_request('GET', endpoint_url, params=params) return response def delete_script(script_id: str) -> Dict: """ Deletes a script given its ID :param script_id: ID of script to delete :return: Response JSON which contains errors (if exist) and how many resources were affected """ endpoint_url = '/real-time-response/entities/scripts/v1' params = { 'ids': script_id } response = http_request('DELETE', endpoint_url, params=params) return response def list_scripts() -> Dict: """ Retrieves list of scripts :return: Response JSON which contains errors (if exist) and retrieved resources """ endpoint_url = '/real-time-response/entities/scripts/v1' response = http_request('GET', endpoint_url) return response def get_extracted_file(host_id: str, sha256: str, filename: str = None): """ Get RTR extracted file contents for specified session and sha256. :param host_id: The host agent ID to initialize the RTR session on. :param sha256: Extracted SHA256 :param filename: Filename to use for the archive name and the file within the archive. """ endpoint_url = '/real-time-response/entities/extracted-file-contents/v1' session_id = init_rtr_single_session(host_id) params = { 'session_id': session_id, 'sha256': sha256 } if filename: params['filename'] = filename response = http_request('GET', endpoint_url, params=params, no_json=True) return response def upload_file(entry_id: str, description: str) -> Tuple: """ Uploads a file given entry ID :param entry_id: The entry ID of the file to upload :param description: String description of file to upload :return: Response JSON which contains errors (if exist) and how many resources were affected and the file name """ endpoint_url = '/real-time-response/entities/put-files/v1' temp_file = None try: file_ = demisto.getFilePath(entry_id) file_name = file_.get('name') # pylint: disable=E1101 temp_file = open(file_.get('path'), 'rb') # pylint: disable=E1101 body = { 'name': (None, file_name), 'description': (None, description), 'file': (file_name, temp_file) } headers = { 'Authorization': HEADERS['Authorization'], 'Accept': 'application/json' } response = http_request('POST', endpoint_url, files=body, headers=headers) return response, file_name finally: if temp_file: temp_file.close() def delete_file(file_id: str) -> Dict: """ Delete a put-file based on the ID given :param file_id: ID of file to delete :return: Response JSON which contains errors (if exist) and how many resources were affected """ endpoint_url = '/real-time-response/entities/put-files/v1' params = { 'ids': file_id } response = http_request('DELETE', endpoint_url, params=params) return response def get_file(file_id: list) -> Dict: """ Get put-files based on the ID's given :param file_id: ID of file to get :return: Response JSON which contains errors (if exist) and retrieved resources """ endpoint_url = '/real-time-response/entities/put-files/v1' params = { 'ids': file_id } response = http_request('GET', endpoint_url, params=params) return response def list_files() -> Dict: """ Get a list of put-file ID's that are available to the user for the put command. :return: Response JSON which contains errors (if exist) and retrieved resources """ endpoint_url = '/real-time-response/entities/put-files/v1' response = http_request('GET', endpoint_url) return response def get_token(new_token=False): """ Retrieves the token from the server if it's expired and updates the global HEADERS to include it :param new_token: If set to True will generate a new token regardless of time passed :rtype: ``str`` :return: Token """ now = datetime.now() ctx = demisto.getIntegrationContext() if ctx and not new_token: passed_mins = get_passed_mins(now, ctx.get('time')) if passed_mins >= TOKEN_LIFE_TIME: # token expired auth_token = get_token_request() demisto.setIntegrationContext({'auth_token': auth_token, 'time': date_to_timestamp(now) / 1000}) else: # token hasn't expired auth_token = ctx.get('auth_token') else: # there is no token auth_token = get_token_request() demisto.setIntegrationContext({'auth_token': auth_token, 'time': date_to_timestamp(now) / 1000}) return auth_token def get_token_request(): """ Sends token request :rtype ``str`` :return: Access token """ body = { 'client_id': CLIENT_ID, 'client_secret': SECRET } headers = { 'Authorization': HEADERS['Authorization'] } token_res = http_request('POST', '/oauth2/token', data=body, headers=headers, safe=True, get_token_flag=False) if not token_res: err_msg = 'Authorization Error: User has no authorization to create a token. Please make sure you entered the' \ ' credentials correctly.' raise Exception(err_msg) return token_res.get('access_token') def get_detections(last_behavior_time=None, behavior_id=None, filter_arg=None): """ Sends detections request. The function will ignore the arguments passed according to priority: filter_arg > behavior_id > last_behavior_time :param last_behavior_time: 3rd priority. The last behavior time of results will be greater than this value :param behavior_id: 2nd priority. The result will only contain the detections with matching behavior id :param filter_arg: 1st priority. The result will be filtered using this argument. :return: Response json of the get detection endpoint (IDs of the detections) """ endpoint_url = '/detects/queries/detects/v1' params = { 'sort': 'first_behavior.asc' } if filter_arg: params['filter'] = filter_arg elif behavior_id: params['filter'] = "behaviors.behavior_id:'{0}'".format(behavior_id) elif last_behavior_time: params['filter'] = "first_behavior:>'{0}'".format(last_behavior_time) response = http_request('GET', endpoint_url, params) return response def get_fetch_detections(last_created_timestamp=None, filter_arg=None, offset: int = 0): """ Sends detection request, based on the created_timestamp field. Used for fetch-incidents Args: last_created_timestamp: last created timestamp of the results will be greater than this value. filter_arg: The result will be filtered using this argument. Returns: Response json of the get detection endpoint (IDs of the detections) """ endpoint_url = '/detects/queries/detects/v1' params = { 'sort': 'first_behavior.asc', 'offset': offset, 'limit': INCIDENTS_PER_FETCH } if filter_arg: params['filter'] = filter_arg elif last_created_timestamp: params['filter'] = "created_timestamp:>'{0}'".format(last_created_timestamp) response = http_request('GET', endpoint_url, params) return response def get_detections_entities(detections_ids): """ Sends detection entities request :param detections_ids: IDs of the requested detections. :return: Response json of the get detection entities endpoint (detection objects) """ ids_json = {'ids': detections_ids} if detections_ids: response = http_request( 'POST', '/detects/entities/summaries/GET/v1', data=json.dumps(ids_json) ) return response return detections_ids def get_incidents_ids(last_created_timestamp=None, filter_arg=None, offset: int = 0): get_incidents_endpoint = '/incidents/queries/incidents/v1' params = { 'sort': 'modified_timestamp.asc', 'offset': offset, 'limit': INCIDENTS_PER_FETCH } if filter_arg: params['filter'] = filter_arg elif last_created_timestamp: params['filter'] = "modified_timestamp:>'{0}'".format(last_created_timestamp) response = http_request('GET', get_incidents_endpoint, params) return response def get_incidents_entities(incidents_ids): ids_json = {'ids': incidents_ids} response = http_request( 'POST', '/incidents/entities/incidents/GET/v1', data=json.dumps(ids_json) ) return response def create_ioc(): """ UNTESTED - Creates an IoC :return: Response json of create IoC request """ args = demisto.args() input_args = {} # req args: input_args['type'] = args['ioc_type'] input_args['value'] = args['ioc_value'] input_args['policy'] = args['policy'] # opt args: input_args['expiration_days'] = args.get('expiration_days') input_args['source'] = args.get('source') input_args['description'] = args.get('description') payload = {k: str(v) for k, v in input_args.items() if v} headers = {'Authorization': HEADERS['Authorization']} return http_request('POST', '/indicators/entities/iocs/v1', params=payload, headers=headers) def search_iocs(): """ UNTESTED IN OAUTH 2- Searches an IoC :return: IoCs that were found in the search """ args = demisto.args() ids = args.get('ids') if not ids: search_args = { 'types': str(args.get('ioc_types', '')).split(','), 'values': str(args.get('ioc_values', '')).split(','), 'policies': str(args.get('policy', '')), 'sources': str(args.get('sources', '')).split(','), 'from.expiration_timestamp': str(args.get('expiration_from', '')), 'to.expiration_timestamp': str(args.get('expiration_to', '')), 'limit': str(args.get('limit', 50)) } payload = {} for k, arg in search_args.items(): if type(arg) is list: if arg[0]: payload[k] = arg elif arg: payload[k] = arg ids = http_request('GET', '/indicators/queries/iocs/v1', payload).get('resources') if not ids: return None else: ids = str(ids) payload = { 'ids': ids } return http_request('GET', '/indicators/entities/iocs/v1', params=payload) def enrich_ioc_dict_with_ids(ioc_dict): """ Enriches the provided ioc_dict with IoC ID :param ioc_dict: IoC dict transformed using the SEARCH_IOC_KEY_MAP :return: ioc_dict with its ID key:value updated """ for ioc in ioc_dict: ioc['ID'] = '{type}:{val}'.format(type=ioc.get('Type'), val=ioc.get('Value')) return ioc_dict def delete_ioc(): """ UNTESTED - Sends a delete IoC request :return: Response json of delete IoC """ ids = str(demisto.args().get('ids')) payload = { 'ids': ids } return http_request('DELETE', '/indicators/entities/iocs/v1', payload) def update_iocs(): """ UNTESTED - Updates the values one or more IoC :return: Response json of update IoC request """ args = demisto.args() input_args = { 'ids': args.get('ids'), 'policy': args.get('policy', ''), 'expiration_days': args.get('expiration_days', ''), 'source': args.get('source'), 'description': args.get('description') } payload = {k: str(v) for k, v in input_args.items() if v} headers = {'Authorization': HEADERS['Authorization']} return http_request('PATCH', '/indicators/entities/iocs/v1', params=payload, headers=headers) def search_device(): """ Searches for devices using the argument provided by the command execution. Returns empty result of no device was found :return: Search device response json """ args = demisto.args() input_arg_dict = { 'device_id': str(args.get('ids', '')).split(','), 'status': str(args.get('status', '')).split(','), 'hostname': str(args.get('hostname', '')).split(','), 'platform_name': str(args.get('platform_name', '')).split(','), 'site_name': str(args.get('site_name', '')).split(',') } url_filter = '{}'.format(str(args.get('filter', ''))) for k, arg in input_arg_dict.items(): if arg: if type(arg) is list: arg_filter = '' for arg_elem in arg: if arg_elem: first_arg = '{filter},{inp_arg}'.format(filter=arg_filter, inp_arg=k) if arg_filter else k arg_filter = "{first}:'{second}'".format(first=first_arg, second=arg_elem) if arg_filter: url_filter = "{url_filter}{arg_filter}".format(url_filter=url_filter + '+' if url_filter else '', arg_filter=arg_filter) else: # All args should be a list. this is a fallback url_filter = "{url_filter}+{inp_arg}:'{arg_val}'".format(url_filter=url_filter, inp_arg=k, arg_val=arg) raw_res = http_request('GET', '/devices/queries/devices/v1', params={'filter': url_filter}) device_ids = raw_res.get('resources') if not device_ids: return None return http_request('GET', '/devices/entities/devices/v1', params={'ids': device_ids}) def behavior_to_entry_context(behavior): """ Transforms a behavior to entry context representation :param behavior: Behavior dict in the format of crowdstrike's API response :return: Behavior in entry context representation """ raw_entry = get_trasnformed_dict(behavior, DETECTIONS_BEHAVIORS_KEY_MAP) raw_entry.update(extract_transformed_dict_with_split(behavior, DETECTIONS_BEHAVIORS_SPLIT_KEY_MAP)) return raw_entry def get_username_uuid(username: str): """ Obtain CrowdStrike user’s UUId by email. :param username: Username to get UUID of. :return: The user UUID """ response = http_request('GET', '/users/queries/user-uuids-by-email/v1', params={'uid': username}) resources: list = response.get('resources', []) if not resources: raise ValueError(f'User {username} was not found') return resources[0] def resolve_detection(ids, status, assigned_to_uuid, show_in_ui, comment): """ Sends a resolve detection request :param ids: Single or multiple ids in an array string format :param status: New status of the detection :param assigned_to_uuid: uuid to assign the detection to :param show_in_ui: Boolean flag in string format (true/false) :param comment: Optional comment to add to the detection :return: Resolve detection response json """ payload = { 'ids': ids } if status: payload['status'] = status if assigned_to_uuid: payload['assigned_to_uuid'] = assigned_to_uuid if show_in_ui: payload['show_in_ui'] = show_in_ui if comment: payload['comment'] = comment # We do this so show_in_ui value won't contain "" data = json.dumps(payload).replace('"show_in_ui": "false"', '"show_in_ui": false').replace('"show_in_ui": "true"', '"show_in_ui": true') return http_request('PATCH', '/detects/entities/detects/v2', data=data) def contain_host(ids): """ Contains host(s) with matching ids :param ids: IDs of host to contain :return: Contain host response json """ payload = { 'ids': ids } data = json.dumps(payload) params = { 'action_name': 'contain' } return http_request('POST', '/devices/entities/devices-actions/v2', data=data, params=params) def lift_host_containment(ids): """ Lifts off containment from host(s) with matchind ids :param ids: IDs of host to lift off containment from :return: Lift off containment response json """ payload = { 'ids': ids } data = json.dumps(payload) params = { 'action_name': 'lift_containment' } return http_request('POST', '/devices/entities/devices-actions/v2', data=data, params=params) def timestamp_length_equalization(timestamp1, timestamp2): """ Makes sure the timestamps are of the same length. Args: timestamp1: First timestamp to compare. timestamp2: Second timestamp to compare. Returns: the two timestamps in the same length (the longer one) """ diff_len = len(str(timestamp1)) - len(str(timestamp2)) # no difference in length if diff_len == 0: return int(timestamp1), int(timestamp2) # length of timestamp1 > timestamp2 if diff_len > 0: ten_times = pow(10, diff_len) timestamp2 = int(timestamp2) * ten_times # length of timestamp2 > timestamp1 else: ten_times = pow(10, diff_len * -1) timestamp1 = int(timestamp1) * ten_times return int(timestamp1), int(timestamp2) ''' COMMANDS FUNCTIONS ''' def get_fetch_times_and_offset(incident_type): last_run = demisto.getLastRun() last_fetch_time = last_run.get(f'first_behavior_{incident_type}_time') offset = last_run.get(f'{incident_type}_offset', 0) if not last_fetch_time: last_fetch_time, _ = parse_date_range(FETCH_TIME, date_format='%Y-%m-%dT%H:%M:%SZ') prev_fetch = last_fetch_time last_fetch_timestamp = int(parse(last_fetch_time).timestamp() * 1000) return last_fetch_time, offset, prev_fetch, last_fetch_timestamp def fetch_incidents(): """ Fetches incident using the detections API :return: Fetched detections in incident format """ incidents = [] # type:List fetch_incidents_or_detections = demisto.params().get('fetch_incidents_or_detections') if 'Detections' in fetch_incidents_or_detections: incident_type = 'detection' last_fetch_time, offset, prev_fetch, last_fetch_timestamp = get_fetch_times_and_offset(incident_type) fetch_query = demisto.params().get('fetch_query') if fetch_query: fetch_query = "created_timestamp:>'{time}'+{query}".format(time=last_fetch_time, query=fetch_query) detections_ids = demisto.get(get_fetch_detections(filter_arg=fetch_query, offset=offset), 'resources') else: detections_ids = demisto.get(get_fetch_detections(last_created_timestamp=last_fetch_time, offset=offset), 'resources') if detections_ids: raw_res = get_detections_entities(detections_ids) if "resources" in raw_res: raw_res['type'] = "detections" for detection in demisto.get(raw_res, "resources"): incident = detection_to_incident(detection) incident_date = incident['occurred'] incident_date_timestamp = int(parse(incident_date).timestamp() * 1000) # make sure that the two timestamps are in the same length if len(str(incident_date_timestamp)) != len(str(last_fetch_timestamp)): incident_date_timestamp, last_fetch_timestamp = timestamp_length_equalization( incident_date_timestamp, last_fetch_timestamp) # Update last run and add incident if the incident is newer than last fetch if incident_date_timestamp > last_fetch_timestamp: last_fetch_time = incident_date last_fetch_timestamp = incident_date_timestamp incidents.append(incident) if len(incidents) == INCIDENTS_PER_FETCH: demisto.setLastRun({'first_behavior_detection_time': prev_fetch, 'detection_offset': offset + INCIDENTS_PER_FETCH}) else: demisto.setLastRun({'first_behavior_detection_time': last_fetch_time}) if 'Incidents' in fetch_incidents_or_detections: incident_type = 'incident' last_fetch_time, offset, prev_fetch, last_fetch_timestamp = get_fetch_times_and_offset(incident_type) fetch_query = demisto.params().get('fetch_query') if fetch_query: fetch_query = "modified_timestamp:>'{time}'+{query}".format(time=last_fetch_time, query=fetch_query) incidents_ids = demisto.get(get_incidents_ids(filter_arg=fetch_query, offset=offset), 'resources') else: incidents_ids = demisto.get(get_incidents_ids(last_created_timestamp=last_fetch_time, offset=offset), 'resources') if incidents_ids: raw_res = get_incidents_entities(incidents_ids) if "resources" in raw_res: raw_res['type'] = "incidents" for incident in demisto.get(raw_res, "resources"): incident_to_context = incident_to_incident_context(incident) incident_date = incident_to_context['occurred'] incident_date_timestamp = int(parse(incident_date).timestamp() * 1000) # make sure that the two timestamps are in the same length if len(str(incident_date_timestamp)) != len(str(last_fetch_timestamp)): incident_date_timestamp, last_fetch_timestamp = timestamp_length_equalization( incident_date_timestamp, last_fetch_timestamp) # Update last run and add incident if the incident is newer than last fetch if incident_date_timestamp > last_fetch_timestamp: last_fetch_time = incident_date last_fetch_timestamp = incident_date_timestamp incidents.append(incident_to_context) if len(incidents) == INCIDENTS_PER_FETCH: demisto.setLastRun({'first_behavior_incident_time': prev_fetch, 'incident_offset': offset + INCIDENTS_PER_FETCH}) else: demisto.setLastRun({'first_behavior_incident_time': last_fetch_time}) return incidents def create_ioc_command(): """ UNTESTED - Creates an IoC :return: EntryObject of create IoC command """ raw_res = create_ioc() return create_entry_object(contents=raw_res, hr="Custom IoC was created successfully.") def search_iocs_command(): """ UNTESTED IN OAUTH 2 - Searches for an ioc :return: EntryObject of search IoC command """ raw_res = search_iocs() if not raw_res: return create_entry_object(hr='Could not find any Indicators of Compromise.') iocs = raw_res.get('resources') ec = [get_trasnformed_dict(ioc, SEARCH_IOC_KEY_MAP) for ioc in iocs] enrich_ioc_dict_with_ids(ec) return create_entry_object(contents=raw_res, ec={'CrowdStrike.IoC(val.ID === obj.ID)': ec}, hr=tableToMarkdown('Indicators of Compromise', ec)) def delete_iocs_command(): """ UNTESTED - Deletes an IoC :return: EntryObject of delete IoC command """ raw_res = delete_ioc() ids = demisto.args().get('ids') return create_entry_object(contents=raw_res, hr="Custom IoC {0} successfully deleted.".format(ids)) def update_iocs_command(): """ UNTESTED - Updates an IoC :return: EntryObject of update IoC command """ raw_res = update_iocs() ids = demisto.args().get('ids') return create_entry_object(contents=raw_res, hr="Custom IoC {0} successfully updated.".format(ids)) def search_device_command(): """ Searches for a device :return: EntryObject of search device command """ raw_res = search_device() if not raw_res: return create_entry_object(hr='Could not find any devices.') devices = raw_res.get('resources') entries = [get_trasnformed_dict(device, SEARCH_DEVICE_KEY_MAP) for device in devices] headers = ['ID', 'Hostname', 'OS', 'MacAddress', 'LocalIP', 'ExternalIP', 'FirstSeen', 'LastSeen'] hr = tableToMarkdown('Devices', entries, headers=headers, headerTransform=pascalToSpace) ec = {'CrowdStrike.Device(val.ID === obj.ID)': entries} return create_entry_object(contents=raw_res, ec=ec, hr=hr) def get_behavior_command(): """ Gets a behavior by ID :return: EntryObject of get behavior command """ behavior_id = demisto.args().get('behavior_id') detections_ids = demisto.get(get_detections(behavior_id=behavior_id), 'resources') raw_res = get_detections_entities(detections_ids) entries = [] if "resources" in raw_res: for resource in demisto.get(raw_res, "resources"): for behavior in demisto.get(resource, 'behaviors'): entries.append(behavior_to_entry_context(behavior)) hr = tableToMarkdown('Behavior ID: {}'.format(behavior_id), entries, headerTransform=pascalToSpace) # no dt since behavior vary by more than their ID ec = {'CrowdStrike.Behavior': entries} return create_entry_object(contents=raw_res, ec=ec, hr=hr) def search_detections_command(): """ Searches for a detection :return: EntryObject of search detections command """ d_args = demisto.args() detections_ids = argToList(d_args.get('ids')) if not detections_ids: filter_arg = d_args.get('filter') if not filter_arg: return_error('Command Error: Please provide at least one argument.') detections_ids = get_detections(filter_arg=filter_arg).get('resources') raw_res = get_detections_entities(detections_ids) entries = [] headers = ['ID', 'Status', 'System', 'ProcessStartTime', 'CustomerID', 'MaxSeverity'] if "resources" in raw_res: for detection in demisto.get(raw_res, "resources"): detection_entry = {} for path, new_key in DETECTIONS_BASE_KEY_MAP.items(): detection_entry[new_key] = demisto.get(detection, path) behaviors = [] for behavior in demisto.get(detection, 'behaviors'): behaviors.append(behavior_to_entry_context(behavior)) detection_entry['Behavior'] = behaviors entries.append(detection_entry) hr = tableToMarkdown('Detections Found:', entries, headers=headers, removeNull=True, headerTransform=pascalToSpace) ec = {'CrowdStrike.Detection(val.ID === obj.ID)': entries} return create_entry_object(contents=raw_res, ec=ec, hr=hr) def resolve_detection_command(): """ Resolves single or multiple detections :return: EntryObject of resolve detection command """ args = demisto.args() ids = argToList(args.get('ids')) username = args.get('username') assigned_to_uuid = args.get('assigned_to_uuid') comment = args.get('comment') if username and assigned_to_uuid: raise ValueError('Only one of the arguments assigned_to_uuid or username should be provided, not both.') if username: assigned_to_uuid = get_username_uuid(username) status = args.get('status') show_in_ui = args.get('show_in_ui') raw_res = resolve_detection(ids, status, assigned_to_uuid, show_in_ui, comment) args.pop('ids') hr = "Detection {0} updated\n".format(str(ids)[1:-1]) hr += 'With the following values:\n' for k, arg in args.items(): hr += '\t{name}:{val}\n'.format(name=k, val=arg) return create_entry_object(contents=raw_res, hr=hr) def contain_host_command(): """ Contains hosts with user arg ids :return: EntryObject of contain host command """ ids = argToList(demisto.args().get('ids')) raw_res = contain_host(ids) hr = "Host {} contained".format(str(ids)[1:-1]) return create_entry_object(contents=raw_res, hr=hr) def lift_host_containment_command(): """ Lifts containment off a host :return: EntryObject of lift host containment """ ids = argToList(demisto.args().get('ids')) raw_res = lift_host_containment(ids) hr = "Containment has been lift off host {}".format(str(ids)[1:-1]) return create_entry_object(contents=raw_res, hr=hr) def run_command(): args = demisto.args() host_ids = argToList(args.get('host_ids')) command_type = args.get('command_type') full_command = args.get('full_command') scope = args.get('scope', 'read') target = args.get('target', 'batch') output = [] if target == 'batch': if scope == 'read': response = run_batch_read_cmd(host_ids, command_type, full_command) elif scope == 'write': response = run_batch_write_cmd(host_ids, command_type, full_command) else: # scope = admin response = run_batch_admin_cmd(host_ids, command_type, full_command) resources: dict = response.get('combined', {}).get('resources', {}) for _, resource in resources.items(): errors = resource.get('errors', []) if errors: error_message = errors[0].get('message', '') if not error_message: error_message = f'Could not run command\n{errors}' return_error(error_message) output.append({ 'HostID': resource.get('aid'), 'SessionID': resource.get('session_id'), 'Stdout': resource.get('stdout'), 'Stderr': resource.get('stderr'), 'BaseCommand': resource.get('base_command'), 'Command': full_command }) human_readable = tableToMarkdown(f'Command {full_command} results', output, removeNull=True) entry_context_batch = { 'CrowdStrike': { 'Command': output } } return create_entry_object(contents=response, ec=entry_context_batch, hr=human_readable) else: # target = 'single' responses = [] for host_id in host_ids: if scope == 'read': response1 = run_single_read_cmd(host_id, command_type, full_command) elif scope == 'write': response1 = run_single_write_cmd(host_id, command_type, full_command) else: # scope = admin response1 = run_single_admin_cmd(host_id, command_type, full_command) responses.append(response1) for resource in response1.get('resources', []): errors = resource.get('errors', []) if errors: error_message = errors[0].get('message', '') if not error_message: error_message = f'Could not run command\n{errors}' return_error(error_message) output.append({ 'HostID': host_id, 'TaskID': resource.get('cloud_request_id'), 'SessionID': resource.get('session_id'), 'BaseCommand': command_type, 'Command': full_command, 'Complete': False, 'NextSequenceID': 0 }) human_readable = tableToMarkdown(f'Command {full_command} results', output, removeNull=True) entry_context_single = { 'CrowdStrike.Command(val.TaskID === obj.TaskID)': output } return create_entry_object(contents=responses, ec=entry_context_single, hr=human_readable) def upload_script_command(): args = demisto.args() name = args.get('name') permission_type = args.get('permission_type', 'private') content = args.get('content') entry_id = args.get('entry_id') if content and entry_id: raise ValueError('Only one of the arguments entry_id or content should be provided, not both.') elif not content and not entry_id: raise ValueError('One of the arguments entry_id or content must be provided, none given.') response = upload_script(name, permission_type, content, entry_id) return create_entry_object(contents=response, hr='The script was uploaded successfully') def get_script_command(): script_id = argToList(demisto.args().get('script_id')) response = get_script(script_id) resources: list = response.get('resources', []) if resources and isinstance(resources, list): resource = resources[0] script = { 'ID': resource.get('id'), 'CreatedBy': resource.get('created_by'), 'CreatedTime': resource.get('created_timestamp'), 'Description': resource.get('description'), 'ModifiedBy': resource.get('modified_by'), 'ModifiedTime': resource.get('modified_timestamp'), 'Name': resource.get('name'), 'Permission': resource.get('permission_type'), 'SHA256': resource.get('sha256'), 'RunAttemptCount': resource.get('run_attempt_count'), 'RunSuccessCount': resource.get('run_success_count'), 'WriteAccess': resource.get('write_access') } human_readable = tableToMarkdown(f'CrowdStrike Falcon script {script_id}', script) entry_context = { 'CrowdStrike.Script(val.ID === obj.ID)': script } script_content = resource.get('content') if script_content: demisto.results( fileResult( f"{resource.get('name', 'script')}.ps1", script_content ) ) return create_entry_object(contents=response, ec=entry_context, hr=human_readable) else: return 'No script found.' def delete_script_command(): script_id = demisto.args().get('script_id') response = delete_script(script_id) return create_entry_object(contents=response, hr=f'Script {script_id} was deleted successfully') def list_scripts_command(): response = list_scripts() resources: list = response.get('resources', []) scripts = [] for resource in resources: scripts.append({ 'ID': resource.get('id'), 'CreatedBy': resource.get('created_by'), 'CreatedTime': resource.get('created_timestamp'), 'Description': resource.get('description'), 'ModifiedBy': resource.get('modified_by'), 'ModifiedTime': resource.get('modified_timestamp'), 'Name': resource.get('name'), 'Permission': resource.get('permission_type'), 'SHA256': resource.get('sha256'), 'RunAttemptCount': resource.get('run_attempt_count'), 'RunSuccessCount': resource.get('run_success_count'), 'Platform': resource.get('platform'), 'WriteAccess': resource.get('write_access') }) human_readable = tableToMarkdown('CrowdStrike Falcon scripts', scripts) entry_context = { 'CrowdStrike.Script(val.ID === obj.ID)': scripts } return create_entry_object(contents=response, ec=entry_context, hr=human_readable) def upload_file_command(): entry_id = demisto.args().get('entry_id') description = demisto.args().get('description', 'File uploaded from Demisto') response, file_name = upload_file(entry_id, description) return create_entry_object(contents=response, hr='File was uploaded successfully') def delete_file_command(): file_id = demisto.args().get('file_id') response = delete_file(file_id) return create_entry_object(contents=response, hr=f'File {file_id} was deleted successfully') def get_file_command(): file_id = argToList(demisto.args().get('file_id')) response = get_file(file_id) resources: list = response.get('resources', []) if resources and isinstance(resources, list): # will always be a list of one resource resource = resources[0] file_ = { 'ID': resource.get('id'), 'CreatedBy': resource.get('created_by'), 'CreatedTime': resource.get('created_timestamp'), 'Description': resource.get('description'), 'Type': resource.get('file_type'), 'ModifiedBy': resource.get('modified_by'), 'ModifiedTime': resource.get('modified_timestamp'), 'Name': resource.get('name'), 'Permission': resource.get('permission_type'), 'SHA256': resource.get('sha256'), } file_standard_context = { 'Type': resource.get('file_type'), 'Name': resource.get('name'), 'SHA256': resource.get('sha256'), 'Size': resource.get('size'), } human_readable = tableToMarkdown(f'CrowdStrike Falcon file {file_id}', file_) entry_context = { 'CrowdStrike.File(val.ID === obj.ID)': file_, outputPaths['file']: file_standard_context } file_content = resource.get('content') if file_content: demisto.results( fileResult( resource.get('name'), file_content ) ) return create_entry_object(contents=response, ec=entry_context, hr=human_readable) else: return 'No file found.' def list_files_command(): response = list_files() resources: list = response.get('resources', []) files_output = [] file_standard_context = [] for resource in resources: files_output.append({ 'ID': resource.get('id'), 'CreatedBy': resource.get('created_by'), 'CreatedTime': resource.get('created_timestamp'), 'Description': resource.get('description'), 'Type': resource.get('file_type'), 'ModifiedBy': resource.get('modified_by'), 'ModifiedTime': resource.get('modified_timestamp'), 'Name': resource.get('name'), 'Permission': resource.get('permission_type'), 'SHA256': resource.get('sha256'), }) file_standard_context.append({ 'Type': resource.get('file_type'), 'Name': resource.get('name'), 'SHA256': resource.get('sha256'), 'Size': resource.get('size'), }) human_readable = tableToMarkdown('CrowdStrike Falcon files', files_output) entry_context = { 'CrowdStrike.File(val.ID === obj.ID)': files_output, outputPaths['file']: file_standard_context } return create_entry_object(contents=response, ec=entry_context, hr=human_readable) def run_script_command(): args = demisto.args() script_name = args.get('script_name') raw = args.get('raw') host_ids = argToList(args.get('host_ids')) if script_name and raw: raise ValueError('Only one of the arguments script_name or raw should be provided, not both.') elif not script_name and not raw: raise ValueError('One of the arguments script_name or raw must be provided, none given.') elif script_name: full_command = f'runscript -CloudFile={script_name}' elif raw: full_command = f'runscript -Raw=```{raw}```' command_type = 'runscript' response = run_batch_admin_cmd(host_ids, command_type, full_command) resources: dict = response.get('combined', {}).get('resources', {}) output = [] for _, resource in resources.items(): errors = resource.get('errors', []) if errors: error_message = errors[0].get('message', '') if not error_message: error_message = f'Could not run command\n{errors}' return_error(error_message) full_command = full_command.replace('`', '') output.append({ 'HostID': resource.get('aid'), 'SessionID': resource.get('session_id'), 'Stdout': resource.get('stdout'), 'Stderr': resource.get('stderr'), 'BaseCommand': resource.get('base_command'), 'Command': full_command }) human_readable = tableToMarkdown(f'Command {full_command} results', output) entry_context = { 'CrowdStrike': { 'Command': output } } return create_entry_object(contents=response, ec=entry_context, hr=human_readable) def run_get_command(): args = demisto.args() host_ids = argToList(args.get('host_ids')) file_path = args.get('file_path') optional_hosts = argToList(args.get('optional_hosts')) timeout = args.get('timeout') timeout_duration = args.get('timeout_duration') timeout = timeout and int(timeout) response = run_batch_get_cmd(host_ids, file_path, optional_hosts, timeout, timeout_duration) resources: dict = response.get('combined', {}).get('resources', {}) output = [] for _, resource in resources.items(): errors = resource.get('errors', []) if errors: error_message = errors[0].get('message', '') if not error_message: error_message = f'Could not get command\n{errors}' return_error(error_message) output.append({ 'HostID': resource.get('aid'), 'Stdout': resource.get('stdout'), 'Stderr': resource.get('stderr'), 'BaseCommand': resource.get('base_command'), 'TaskID': resource.get('task_id'), 'GetRequestID': response.get('batch_get_cmd_req_id'), 'Complete': resource.get('complete') or False, 'FilePath': file_path }) human_readable = tableToMarkdown(f'Get command has requested for a file {file_path}', output) entry_context = { 'CrowdStrike.Command(val.TaskID === obj.TaskID)': output } return create_entry_object(contents=response, ec=entry_context, hr=human_readable) def status_get_command(): args = demisto.args() request_ids = argToList(args.get('request_ids')) timeout = args.get('timeout') timeout_duration = args.get('timeout_duration') timeout = timeout and int(timeout) responses = [] files_output = [] file_standard_context = [] for request_id in request_ids: response = status_get_cmd(request_id, timeout, timeout_duration) responses.append(response) resources: dict = response.get('resources', {}) for _, resource in resources.items(): errors = resource.get('errors', []) if errors: error_message = errors[0].get('message', '') if not error_message: error_message = f'Could not get command\n{errors}' return_error(error_message) files_output.append({ 'ID': resource.get('id'), 'TaskID': resource.get('cloud_request_id'), 'CreatedAt': resource.get('created_at'), 'DeletedAt': resource.get('deleted_at'), 'UpdatedAt': resource.get('updated_at'), 'Name': resource.get('name'), 'Size': resource.get('size'), 'SHA256': resource.get('sha256') }) file_standard_context.append({ 'Name': resource.get('name'), 'SHA256': resource.get('sha256'), 'Size': resource.get('size'), }) human_readable = tableToMarkdown('CrowdStrike Falcon files', files_output) entry_context = { 'CrowdStrike.File(val.ID === obj.ID || val.TaskID === obj.TaskID)': files_output, outputPaths['file']: file_standard_context } if len(responses) == 1: return create_entry_object(contents=responses[0], ec=entry_context, hr=human_readable) else: return create_entry_object(contents=response, ec=entry_context, hr=human_readable) def status_command(): args = demisto.args() request_id = args.get('request_id') sequence_id = args.get('sequence_id') scope = args.get('scope', 'read') sequence_id = None if sequence_id is None else int(sequence_id) if scope == 'read': response = status_read_cmd(request_id, sequence_id) elif scope == 'write': response = status_write_cmd(request_id, sequence_id) else: # scope = admin response = status_admin_cmd(request_id, sequence_id) resources: list = response.get('resources', []) output = [] for resource in resources: errors = resource.get('errors', []) if errors: error_message = errors[0].get('message', '') if not error_message: error_message = f'Could not run command\n{errors}' return_error(error_message) sequence_id = int(resource.get('sequence_id', 0)) output.append({ 'Complete': resource.get('complete') or False, 'Stdout': resource.get('stdout'), 'Stderr': resource.get('stderr'), 'BaseCommand': resource.get('base_command'), 'TaskID': resource.get('task_id'), 'SequenceID': sequence_id, 'NextSequenceID': sequence_id + 1 }) human_readable = tableToMarkdown('Command status results', output, removeNull=True) entry_context = { 'CrowdStrike.Command(val.TaskID === obj.TaskID)': output } return create_entry_object(contents=response, ec=entry_context, hr=human_readable) def get_extracted_file_command(): args = demisto.args() host_id = args.get('host_id') sha256 = args.get('sha256') filename = args.get('filename') response = get_extracted_file(host_id, sha256, filename) # save an extracted file content_type = response.headers.get('Content-Type', '').lower() if content_type == 'application/x-7z-compressed': content_disposition = response.headers.get('Content-Disposition', '').lower() if content_disposition: filename = email.message_from_string(f'Content-Disposition: {content_disposition}\n\n').get_filename() if not filename: sha256 = sha256 or hashlib.sha256(response.content).hexdigest() filename = sha256.lower() + '.7z' return fileResult(filename, response.content) return_error('An extracted file is missing in the response') def list_host_files_command(): args = demisto.args() host_id = args.get('host_id') response = list_host_files(host_id) resources: list = response.get('resources', []) files_output = [] file_standard_context = [] command_output = [] for resource in resources: errors = resource.get('errors', []) if errors: error_message = errors[0].get('message', '') if not error_message: error_message = f'Could not run command\n{errors}' return_error(error_message) command_output.append({ 'HostID': host_id, 'TaskID': resource.get('cloud_request_id'), 'SessionID': resource.get('session_id') }) files_output.append({ 'ID': resource.get('id'), 'CreatedAt': resource.get('created_at'), 'DeletedAt': resource.get('deleted_at'), 'UpdatedAt': resource.get('updated_at'), 'Name': resource.get('name'), 'SHA256': resource.get('sha256'), 'Size': resource.get('size'), 'Stdout': resource.get('stdout'), 'Stderr': resource.get('stderr') }) file_standard_context.append({ 'Name': resource.get('name'), 'SHA256': resource.get('sha256'), 'Size': resource.get('size'), }) if files_output: human_readable = tableToMarkdown('CrowdStrike Falcon files', files_output) else: human_readable = 'No result found' entry_context = { 'CrowdStrike.Command(val.TaskID === obj.TaskID)': command_output, 'CrowdStrike.File(val.ID === obj.ID)': files_output, outputPaths['file']: file_standard_context } return create_entry_object(contents=response, ec=entry_context, hr=human_readable) def refresh_session_command(): args = demisto.args() host_id = args.get('host_id') response = refresh_session(host_id) resources: list = response.get('resources', []) session_id = None for resource in resources: errors = resource.get('errors', []) if errors: error_message = errors[0].get('message', '') if not error_message: error_message = f'Could not run command\n{errors}' return_error(error_message) session_id = resource.get('session_id') return create_entry_object(contents=response, hr=f'CrowdStrike Session Refreshed: {session_id}') def build_url_filter_for_device_id(args): indicator_type = args.get('type') indicator_value = args.get('value') url_filter = f'/indicators/queries/devices/v1?type={indicator_type}&value={indicator_value}' return url_filter def build_error_message(raw_res): if raw_res.get('errors'): error_data = raw_res.get('errors')[0] else: error_data = {"code": 'None', "message": 'something got wrong, please try again'} error_code = error_data.get('code') error_message = error_data.get('message') return f'Error: error code: {error_code}, error_message: {error_message}.' def validate_response(raw_res): return 'resources' in raw_res.keys() def get_indicator_device_id(): args = demisto.args() url_filter = build_url_filter_for_device_id(args) raw_res = http_request('GET', url_filter) context_output = '' if validate_response(raw_res): context_output = raw_res.get('resources') else: error_message = build_error_message(raw_res) return_error(error_message) return CommandResults( readable_output=context_output, outputs_prefix='CrowdStrike.DeviceID', outputs_key_field='DeviceID', outputs=context_output ) def detections_to_human_readable(detections): detections_readable_outputs = [] for detection in detections: readable_output = assign_params(status=detection.get('status'), max_severity=detection.get('max_severity_displayname'), detection_id=detection.get('detection_id'), created_time=detection.get('created_timestamp')) detections_readable_outputs.append(readable_output) headers = ['detection_id', 'created_time', 'status', 'max_severity'] human_readable = tableToMarkdown('CrowdStrike Detections', detections_readable_outputs, headers, removeNull=True) return human_readable def list_detection_summaries_command(): fetch_query = demisto.args().get('fetch_query') if fetch_query: fetch_query = "{query}".format(query=fetch_query) detections_ids = demisto.get(get_fetch_detections(filter_arg=fetch_query), 'resources') else: detections_ids = demisto.get(get_fetch_detections(), 'resources') detections_response_data = get_detections_entities(detections_ids) detections = [resource for resource in detections_response_data.get('resources')] detections_human_readable = detections_to_human_readable(detections) return CommandResults( readable_output=detections_human_readable, outputs_prefix='CrowdStrike.Detections', outputs_key_field='detection_id', outputs=detections ) def incidents_to_human_readable(incidents): incidents_readable_outputs = [] for incident in incidents: readable_output = assign_params(description=incident.get('description'), state=incident.get('state'), name=incident.get('name'), tags=incident.get('tags'), incident_id=incident.get('incident_id'), created_time=incident.get('created')) incidents_readable_outputs.append(readable_output) headers = ['incident_id', 'created_time', 'name', 'description', 'state', 'tags'] human_readable = tableToMarkdown('CrowdStrike Incidents', incidents_readable_outputs, headers, removeNull=True) return human_readable def list_incident_summaries_command(): fetch_query = demisto.args().get('fetch_query') if fetch_query: fetch_query = "{query}".format(query=fetch_query) incidents_ids = get_incidents_ids(filter_arg=fetch_query) else: incidents_ids = get_incidents_ids() incidents_response_data = get_incidents_entities(incidents_ids) incidents = [resource for resource in incidents_response_data.get('resources')] incidents_human_readable = detections_to_human_readable(incidents) return CommandResults( readable_output=incidents_human_readable, outputs_prefix='CrowdStrike.Incidents', outputs_key_field='incident_id', outputs=incidents ) def test_module(): try: get_token(new_token=True) except ValueError: return 'Connection Error: The URL or The API key you entered is probably incorrect, please try again.' if demisto.params().get('isFetch'): try: fetch_incidents() except ValueError: return 'Error: Something is wrong with the filters you entered for the fetch incident, please try again.' return 'ok' ''' COMMANDS MANAGER / SWITCH PANEL ''' def main(): LOG('Command being called is {}'.format(demisto.command())) # should raise error in case of issue if demisto.command() == 'fetch-incidents': demisto.incidents(fetch_incidents()) try: if demisto.command() == 'test-module': result = test_module() return_results(result) get_token(new_token=True) demisto.results('ok') elif demisto.command() == 'cs-device-ran-on': return_results(get_indicator_device_id()) elif demisto.command() == 'cs-falcon-search-device': demisto.results(search_device_command()) elif demisto.command() == 'cs-falcon-get-behavior': demisto.results(get_behavior_command()) elif demisto.command() == 'cs-falcon-search-detection': demisto.results(search_detections_command()) elif demisto.command() == 'cs-falcon-resolve-detection': demisto.results(resolve_detection_command()) elif demisto.command() == 'cs-falcon-contain-host': demisto.results(contain_host_command()) elif demisto.command() == 'cs-falcon-lift-host-containment': demisto.results(lift_host_containment_command()) elif demisto.command() == 'cs-falcon-run-command': demisto.results(run_command()) elif demisto.command() == 'cs-falcon-upload-script': demisto.results(upload_script_command()) elif demisto.command() == 'cs-falcon-get-script': demisto.results(get_script_command()) elif demisto.command() == 'cs-falcon-delete-script': demisto.results(delete_script_command()) elif demisto.command() == 'cs-falcon-list-scripts': demisto.results(list_scripts_command()) elif demisto.command() == 'cs-falcon-upload-file': demisto.results(upload_file_command()) elif demisto.command() == 'cs-falcon-delete-file': demisto.results(delete_file_command()) elif demisto.command() == 'cs-falcon-get-file': demisto.results(get_file_command()) elif demisto.command() == 'cs-falcon-list-files': demisto.results(list_files_command()) elif demisto.command() == 'cs-falcon-run-script': demisto.results(run_script_command()) elif demisto.command() == 'cs-falcon-run-get-command': demisto.results(run_get_command()) elif demisto.command() == 'cs-falcon-status-get-command': demisto.results(status_get_command()) elif demisto.command() == 'cs-falcon-status-command': demisto.results(status_command()) elif demisto.command() == 'cs-falcon-get-extracted-file': demisto.results(get_extracted_file_command()) elif demisto.command() == 'cs-falcon-list-host-files': demisto.results(list_host_files_command()) elif demisto.command() == 'cs-falcon-refresh-session': demisto.results(refresh_session_command()) elif demisto.command() == 'cs-falcon-list-detection-summaries': return_results(list_detection_summaries_command()) elif demisto.command() == 'cs-falcon-list-incident-summaries': return_results(list_incident_summaries_command()) # Log exceptions except Exception as e: return_error(str(e)) if __name__ in ('__main__', 'builtin', 'builtins'): main()
from flask import Flask, request as flaskRequest, jsonify, render_template import requests import json import time from datetime import datetime, timedelta import sqlite3 import sys import ccxt import os from threading import Thread from pathlib import Path import logging #setup the logging module for file output log = logging.getLogger('spotbit') log.setLevel(logging.DEBUG) logFileHandler = logging.FileHandler('/home/spotbit/.spotbit/spotbit.log') logFileHandler.setLevel(logging.DEBUG) log.addHandler(logFileHandler) #Config Settings allowedFields = ["keepWeeks", "exchanges", "currencies", "interval", "exchange_limit", "averaging_time", "historicalExchanges", "historyEnd"] configPath = Path("/home/spotbit/.spotbit/spotbit.config") #Default values; these will be overwritten when the config file is read exchanges = [] historicalExchanges = [] # exchanges that we want the history of currencies = [] interval = 10 #time to wait between GET requests to servers, to avoid ratelimits keepWeeks = 3 # add this to the config file exchange_limit = 200 #when there are more exchanges than this multithreading is ideal performance_mode = False averaging_time = 1 # the number of hours that we should average information over historyEnd = 0 on_demand = False # whether or not we are caching data score = 0 #the current percent of empty tables #the information regarding the current thread threadResults = None # curated exchange lists for creating averages curated_exchanges = {'USD': ['coinbasepro', 'okcoin', 'bitfinex', 'kraken', 'bitstamp'], 'GBP': ['coinbasepro', 'coinsbank', 'bitstamp', 'kraken', 'cexio'], 'EUR': ['kraken', 'coinbasepro', 'bitstamp', 'bitfinex', 'indoex'], 'JPY': ['bitflyer', 'liquid', 'coincheck', 'bitbank', 'zaif'], 'USDT': ['binance', 'okex', 'huobipro', 'bitmax', 'gateio']} curated_exchanges_list = ['gemini', 'bitstamp', 'okcoin', 'coinsbit', 'coinbasepro', 'coinsbank', 'kraken', 'cexio', 'bitfinex', 'indoex', 'bitflyer', 'liquid', 'coincheck', 'bitbank', 'zaif', 'hitbtc', 'binance', 'okex', 'gateio', 'bitmax'] curated_currencies = ['USD', 'GBP', 'EUR', 'JPY', 'AUD', 'USDT'] p = Path("/home/spotbit/.spotbit/sb.db") db = sqlite3.connect(p) print(f"db opened in {p}") log.debug(f"db opened in {p}") ONION = "" try: ONION = os.environ["ONION"] #get this value from the path print(f"spotbit is running at {ONION}") except Exception as e: print(f"cant find ONION in PATH {e}") # Database configuration # We need to have the database opened manually once so that systemd can access it def configure_db(): p = Path("/home/spotbit/.spotbit/sb.db") db = sqlite3.connect(p) print(f"db opened in {p}") log.debug(f"db opened in {p}") app = Flask(__name__) # split up the number of exchanges per chunk based on how many cpu cores are available # cpuOffset: the number of cores you want to try and utilize. def optimize_chunks(cpuOffset): return int(len(exchanges) / (os.cpu_count()-cpuOffset)) # Create a dict that contains ccxt objects for every supported exchange. # The API will query a subset of these exchanges based on what the user has specified # Unsupported exchanges: bitvaro phemex vaultoro # Future Plans: # Hard coding supported exchanges is a bad practice. CCXT autogenerates code for each exchange and therefore at least in theory may frequently support new exchanges. # Need to find a way to automatically create a list of exchange objects. # btctradeim doesn't want to work on raspberry pi def init_supported_exchanges(): objects = {"acx":ccxt.acx(), "aofex":ccxt.aofex(), "bequant":ccxt.bequant(), "bibox":ccxt.bibox(), "bigone":ccxt.bigone(), "binance":ccxt.binance(), "bitbank":ccxt.bitbank(), "bitbay":ccxt.bitbay(), "bitfinex":ccxt.bitfinex(), "bitflyer":ccxt.bitflyer(), "bitforex":ccxt.bitforex(), "bithumb":ccxt.bithumb(), "bitkk":ccxt.bitkk(), "bitmax":ccxt.bitmax(), "bitstamp":ccxt.bitstamp(), "bittrex":ccxt.bittrex(), "bitz":ccxt.bitz(), "bl3p":ccxt.bl3p(), "bleutrade":ccxt.bleutrade(), "braziliex":ccxt.braziliex(), "btcalpha":ccxt.btcalpha(), "btcbox":ccxt.btcbox(), "btcmarkets":ccxt.btcmarkets(), "btctradeua":ccxt.btctradeua(), "bw":ccxt.bw(), "bybit":ccxt.bybit(), "bytetrade":ccxt.bytetrade(), "cex":ccxt.cex(), "chilebit":ccxt.chilebit(), "coinbase":ccxt.coinbase(), "coinbasepro":ccxt.coinbasepro(), "coincheck":ccxt.coincheck(), "coinegg":ccxt.coinegg(), "coinex":ccxt.coinex(), "coinfalcon":ccxt.coinfalcon(), "coinfloor":ccxt.coinfloor(), "coinmate":ccxt.coinmate(), "coinone":ccxt.coinone(), "crex24":ccxt.crex24(), "currencycom":ccxt.currencycom(), "digifinex":ccxt.digifinex(), "dsx":ccxt.dsx(), "eterbase":ccxt.eterbase(), "exmo":ccxt.exmo(), "exx":ccxt.exx(), "foxbit":ccxt.foxbit(), "ftx":ccxt.ftx(), "gateio":ccxt.gateio(), "gemini":ccxt.gemini(), "hbtc":ccxt.hbtc(), "hitbtc":ccxt.hitbtc(), "hollaex":ccxt.hollaex(), "huobipro":ccxt.huobipro(), "ice3x":ccxt.ice3x(), "independentreserve":ccxt.independentreserve(), "indodax":ccxt.indodax(), "itbit":ccxt.itbit(), "kraken":ccxt.kraken(), "kucoin":ccxt.kucoin(), "lakebtc":ccxt.lakebtc(), "latoken":ccxt.latoken(), "lbank":ccxt.lbank(), "liquid":ccxt.liquid(), "luno":ccxt.luno(), "lykke":ccxt.lykke(), "mercado":ccxt.mercado(), "oceanex":ccxt.oceanex(), "okcoin":ccxt.okcoin(), "okex":ccxt.okex(), "paymium":ccxt.paymium(), "poloniex":ccxt.poloniex(), "probit":ccxt.probit(), "southxchange":ccxt.southxchange(), "stex":ccxt.stex(), "surbitcoin":ccxt.surbitcoin(), "therock":ccxt.therock(), "tidebit":ccxt.tidebit(), "tidex":ccxt.tidex(), "upbit":ccxt.upbit(), "vbtc":ccxt.vbtc(), "wavesexchange":ccxt.wavesexchange(), "whitebit":ccxt.whitebit(), "yobit":ccxt.yobit(), "zaif":ccxt.zaif(), "zb":ccxt.zb()} return objects # Check if a given exchange is in the list of supported exchanges. # Currently, the list of supported exchanges is all those supported by ccxt aside from a small handful that did not seem to work properly. May be bug in ccxt or just a typo in their code / docs def is_supported(exchange): try: obj = ex_objs[exchange] if obj != None: return True else: return False except Exception as e: print(f"caught an error: {e}") log.error(f"caught an error {e}") return False # Check if a timestamp has ms precision by modding by 1000 def is_ms(timestamp): if timestamp % 1000 == 0: return True return False # We create a list of all exchanges to do error checking on user input ex_objs = init_supported_exchanges() num_exchanges = len(ex_objs) print(f"created list of {num_exchanges}") log.info(f"created list of {num_exchanges}") @app.route('/') def index(): date_start = (datetime.now() - timedelta(days=5)).timestamp()*1e3 date_end = (datetime.now()).timestamp()*1e3 f0 = f"{ONION}/now/USD/coinbasepro" f1 = f"{ONION}/now/USD" f2 = f"{ONION}/hist/USD/coinbasepro/{date_start}/{date_end}" f3 = f"{ONION}/configure" return render_template('index.html', fetch_0=f0,fetch_1=f1,fetch_2=f2,fetch_3=f3,date_start=date_start,date_end=date_end) # TODO: create an html page to render here @app.route('/status') def status(): global score global threadResults if performance_mode: l = len(threadResults) content = f"Threads: {l}" for chunk, thread in threadResults: html += f"{chunk} at memory address: {thread}" return f"<html><p>{content}</p></html>" else: return "server is running" # configure the settings of Spotbit while the server is still running # send a GET request to this route to view current settings # send a POST request to this route with settings fields stored in JSON to update settings # TODO: make the updates persistant by also writing them to file. @app.route('/configure', methods=['GET', 'POST']) def configure(): # seems like this needs to be done in order to reference global vars inside of the flask server thread global keepWeeks global currencies global exchanges global interval global on_demand if flaskRequest.method == 'POST': #return the config settings TODO: error check so that the user doesn't have to submit everything at once. Also implement a form here. keepWeeks = flaskRequest.json("keepWeeks") exchanges = flaskRequest.json("exchanges") currencies = flaskRequest.json("currencies") interval = flaskRequest.json("interval") return {'updated settings?':'yes', 'keepWeeks':keepWeeks, 'currencies':currencies, 'exchanges':exchanges, 'interval':interval} else: return {'updated settings?':'no', 'keepWeeks':keepWeeks, 'currencies':currencies, 'on demand exchanges':list(ex_objs.keys()), 'cached exchanges': exchanges, 'interval':interval} # return averages in a list of tuples # find the oldest timestamp in the list of tuples def average_price_value(tuple_list, tuple_length, ticker): running_sums = [0] * tuple_length oldest_timestamp = 1e13 for tup in tuple_list: if tup != None and tup[1] < oldest_timestamp: oldest_timestamp = tup[1] for i in range(0,tuple_length): if i > 3: running_sums[i] += tup[i] list_len = len(tuple_list) return {'id': 'average_value', 'timestamp': (datetime.now()).timestamp()*1e3, 'datetime': datetime.now(), 'oldest_timestamp': oldest_timestamp, 'currency_pair': ticker, 'open': running_sums[4]/list_len, 'high': running_sums[5]/list_len, 'low': running_sums[6]/list_len, 'close': running_sums[7]/list_len, 'volume': running_sums[8]/list_len} # route for when a call is made without specifying an exchange. # return an average of the 5 curated exchanges for that currency @app.route('/now/<currency>') def now_noex(currency): global averaging_time db_n = sqlite3.connect(p, timeout=30) currency = currency.upper() ticker = f"BTC-{currency}" # only calculate averages if a list has been curated already if currency in curated_currencies: components = curated_exchanges[currency] failed_exchanges = [] components_list = [] for exchange in components: ms_check = f"SELECT timestamp FROM {exchange} LIMIT 1;" cursor = db_n.execute(ms_check) res = cursor.fetchone() # only take values from within 15 min of present ts_delta = (datetime.now() - timedelta(hours=averaging_time)).timestamp() if res!= None and is_ms(int(res[0])): ts_delta *= 1e3 statement = f"SELECT * FROM {exchange} WHERE pair = '{ticker}' AND timestamp > {ts_delta} ORDER BY timestamp DESC LIMIT 1;" cursor = db_n.execute(statement) res = cursor.fetchone() if res != None: components_list.append(res) else: # if there is no data in the table yet, then try a direct request. res = fallback_to_direct(exchange, currency, db_n) if len(res) < 2: log.error(f"could not get data from {exchange}") failed_exchanges.append(exchange) else: components_list.append(res) result = average_price_value(components_list, 9, ticker) result['exchanges'] = components result['failed_exchanges'] = failed_exchanges return result # Get the latest price entry in the database. # Currency: the three letter base currency desired. Must be a currency you are already collecting data for # Exchange: the exchange to query data for from the local database. @app.route('/now/<currency>/<exchange>') def now(currency, exchange): db_n = sqlite3.connect(p, timeout=30) ticker = "BTC-{}".format(currency.upper()) if exchange in exchanges: #if the exchange is already in the config file #statement = "SELECT * FROM {} WHERE pair = '{}' AND timestamp = (SELECT MAX(timestamp) FROM {});".format(exchange, ticker, exchange) statement = f"SELECT * FROM {exchange} WHERE pair = '{ticker}' ORDER BY timestamp DESC LIMIT 1;" try: cursor = db_n.execute(statement) res = cursor.fetchone() except sqlite3.OperationalError: print("database is locked. Cannot access it") log.error("database is locked. Cannot access it") return {'err': 'database locked'} if res != None: db_n.close() return {'id':res[0], 'timestamp':res[1], 'datetime':res[2], 'currency_pair':res[3], 'open':res[4], 'high':res[5], 'low':res[6], 'close':res[7], 'vol':res[8]} else: db_n.close() return fallback_to_direct(exchange, currency, db_n) elif exchange == "all": #if all is selected then we select from all exchanges and average the latest close result_set = [] for e in exchanges: ts_cutoff = (datetime.now() - timedelta(hours=averaging_time)).timestamp() check_ms = f"SELECT timestamp FROM {e} LIMIT 1;" cursor = db_n.execute(check_ms) db_n.commit() ts = cursor.fetchone() if ts != None and is_ms(int(ts[0])): print(f"using millisecond precision for {e}") logging.info(f"using millisecond precision for {e}") ts_cutoff *= 1e3 statement = f"SELECT timestamp, close FROM {e} WHERE timestamp > {ts_cutoff} AND pair = '{ticker}' ORDER BY timestamp LIMIT 1;" cursor = db_n.execute(statement) db_n.commit() result = cursor.fetchone() if result != None: result_set.append(result[1]) return {'ticker': list_mean(result_set)} else: return fallback_to_direct(exchange, currency, db_n) # This method will directly request an exchange that is supported but who's table is also empty def fallback_to_direct(exchange, currency, db_n): #make a direct request ticker = "BTC-{}".format(currency.upper()) res = request_single(exchange, currency) #db_n.close() if res != None: return res else: return {'id': res} # Find the mean of a list of two-value tuples def list_mean(input_list): avg = 0.0 for l in input_list: avg += l return avg/len(input_list) # Get data from local storage inside of a certain range. # Parameters: # Currency: the fiat base currency to fetch data for. Should be a three letter currency code in lowercase. # Exchange: the exchange to get data from. # date_start and date_end: date_start is the oldest time value in the range desired. It can be provided as a millisecond timestamp or as a datetime formatted as "YYYY-MM-DDTHH:mm:SS". @app.route('/hist/<currency>/<exchange>/<date_start>/<date_end>', methods=['GET']) def hist(currency, exchange, date_start, date_end): db_n = sqlite3.connect(p, timeout=10) ticker = "BTC-{}".format(currency.upper()) #check what format of dates we have if (str(date_start)).isdigit(): date_s = int(date_start) date_e = int(date_end) else: #error checking for malformed dates try: date_s = (datetime.fromisoformat(date_start.replace("T", " "))).timestamp()*1000 date_e = (datetime.fromisoformat(date_end.replace("T", " "))).timestamp()*1000 except Exception: return "malformed dates. Provide both dates in the same format: use YYYY-MM-DDTHH:mm:SS or millisecond timestamps" # check the table we want to select from to see the precision of it check = f"SELECT timestamp FROM {exchange} ORDER BY timestamp DESC LIMIT 1;" cursor = db_n.execute(check) statement = "" ts = cursor.fetchone() if ts != None and is_ms(int(ts[0])): statement = f"SELECT * FROM {exchange} WHERE timestamp > {date_s} AND timestamp < {date_e} AND pair = '{ticker}';" else: # for some exchanges we cannot use ms precision timestamps (such as coinbase) date_s /= 1e3 date_e /= 1e3 statement = f"SELECT * FROM {exchange} WHERE timestamp > {date_s} AND timestamp < {date_e} AND pair = '{ticker}';" # keep trying in case of database locked error while True: try: cursor = db_n.execute(statement) break except sqlite3.OperationalError as oe: time.sleep(5) res = cursor.fetchall() db_n.close() return {'columns': ['id', 'timestamp', 'datetime', 'currency_pair', 'open', 'high', 'low', 'close', 'vol'], 'data':res} # Return all database rows within `tolerance` for each of the supplied dates # Dates should be provided as millisecond timestamps separated by hyphens @app.route('/hist/<currency>/<exchange>/<dates>') def hist_single_dates(currency, exchange, dates): db_n = sqlite3.connect(p, timeout=10) ticker = "BTC-{}".format(currency.upper()) dates_list = dates.split("-") # the number of minutes away from a given date that is considered acceptable tolerance = 30 results = {} check_ms = f"SELECT timestamp FROM {exchange} LIMIT 1;" cursor = db_n.execute(check_ms) ts = cursor.fetchone() ms_precision = True if ts != None and is_ms(int(ts[0])) != True: ms_precision = False for d in dates_list: try: ts = int(d) except Exception: return f"malformed date {d}" dt = datetime.fromtimestamp(ts/1e3) lower_bound = (dt - timedelta(minutes=tolerance)).timestamp()*1e3 upper_bound = (dt + timedelta(minutes=tolerance)).timestamp()*1e3 if ms_precision == False: ts /= 1e3 statement = f"SELECT * FROM {exchange} WHERE pair = '{ticker}' AND timestamp > {lower_bound} AND timestamp > {upper_bound} ORDER BY timestamp ASC;" # right now we return everything while True: try: cursor = db_n.execute(statement) res = cursor.fetchall()[0] break except sqlite3.OperationalError: time.sleep(2) if res != None: results[f"{d}"] = {'id':res[0], 'timestamp':res[1], 'datetime':res[2], 'pair':res[3], 'open':res[4], 'high':res[5], 'low':res[6], 'close':res[7], 'vol':res[8]} else: results[f"{d}"] = None return results # Make a single request, without having to loop through all exchanges and currency pairs. # This is intended for when the user requests an exchange in /now that is not present in the database. # It will probably not be used for /hist because of the length of time getting arbitrary amounts of historical data can be def request_single(exchange, currency): if not is_supported(exchange): return f"{exchange} is not supported by CCXT" obj = ex_objs[exchange] ticker = "BTC/{}".format(currency.upper()) dt = None if obj.has['fetchOHLCV']: tframe = '1m' # drop all this in a separate method lim = 1000 if exchange == "bleutrade" or exchange == "btcalpha" or exchange == "rightbtc" or exchange == "hollaex": tframe = '1h' if exchange == "poloniex": tframe = '5m' # some exchanges have explicit limits on how many candles you can get at once if exchange == "bitstamp": lim = 1000 if exchange == "bybit": lim = 200 if exchange == "eterbase": lim = 1000000 if exchange == "exmo": lim = 3000 if exchange == "btcalpha": lim = 720 result = None if exchange == "bitfinex": #other exchanges requiring special conditions: bitstamp, bitmart params = {'limit':100, 'start':(round((datetime.now()-timedelta(hours=1)).timestamp()*1000)), 'end':round(datetime.now().timestamp()*1000)} try: result = ex_objs[exchange].fetch_ohlcv(symbol=ticker, timeframe=tframe, since=None, params=params) except Exception as e: print(f"got an error requesting info from {exchange}: {e}") logging.error(f"got an error requesting info frm {exchange}: {e}") else: try: result = obj.fetch_ohlcv(symbol=ticker, timeframe=tframe, since=None, limit=lim) except Exception as e: print(f"got an error requesting info from {exchange}: {e}") logging.error(f"got an error requesting info from {exchange}: {e}") else: try: result = obj.fetch_ticker(ticker) if result != None and is_ms(result['timestamp']) == False: dt = datetime.fromtimestamp(result['timestamp']) else: dt = datetime.fromtimestamp(result['timestamp'] / 1e3) if result != None: return {'close': result['close'], 'symbol': ticker, 'timestamp': result['timestamp'], 'datetime': dt, 'volume': result['bidVolume'], 'id': 'on_demand'} except Exception as e: print(f"got ratelimited on {e}") logging.error(f"got ratelimited on {e}") if result != None: res = result[-1] if is_ms(res[0]): dt = datetime.fromtimestamp(res[0]/1e3) else: dt = datetime.fromtimestamp(res[0]) return {'id': 'on_demand', 'timestamp': res[0], 'datetime': dt, 'currency_pair': ticker, 'open': res[1], 'high': res[2], 'low': res[3], 'close': res[4], 'vol': res[5]} else: return "no data" # Make an HTTP GET request to exchanges via the ccxt API # TODO: add error checking for if an exchange supports ohlc data. If not, default to regular price data. (done) # Loop through all chosen exchanges, check if they are supported, loop through all chosen currencies, for each make request to ohlc endpoint if supported, else price ticker. Write data to local storage. # Bitfinex special rule: bitfinex returns candles from the beginning of time, not the most recent. This is a behavior of the API itself and has nothing to do with this code or ccxt. Therefore we must specify the timeframe desired in the optional params field of the function call with a dictionary of available options. def request(exchanges,interval,db_n): global currencies for e in exchanges: for curr in currencies: ticker = "BTC/{}".format(curr) success = True if ex_objs[e].has['fetchOHLCV']: candle = None tframe = '1m' lim = 1000 if e == "bleutrade" or e == "btcalpha" or e == "rightbtc" or e == "hollaex": tframe = '1h' if e == "poloniex": tframe = '5m' # some exchanges have explicit limits on how many candles you can get at once if e == "bitstamp": lim = 1000 if e == "bybit": lim = 200 if e == "eterbase": lim = 1000000 if e == "exmo": lim = 3000 if e == "btcalpha": lim = 720 if e == "bitfinex": params = {'limit':100, 'start':(round((datetime.now()-timedelta(hours=1)).timestamp()*1000)), 'end':round(datetime.now().timestamp()*1000)} try: candle = ex_objs[e].fetch_ohlcv(symbol=ticker, timeframe=tframe, since=None, params=params) if candle == None: raise Exception(f"candle from {e} is null") except Exception as err: #figure out this error type #the point so far is to gracefully handle the error, but waiting for the next cycle should be good enough if "does not have" not in str(err): print(f"error fetching candle: {e} {curr} {err}") log.error(f"error fetching candle: {e} {curr} {err}") success = False else: try: candle = ex_objs[e].fetch_ohlcv(symbol=ticker, timeframe=tframe, since=None, limit=lim) #'ticker' was listed as 'symbol' before | interval should be determined in the config file if candle == None: raise Exception(f"candle from {e} is nulll") except Exception as err: if "does not have" not in str(err): print(f"error fetching candle: {e} {curr} {err}") log.error(f"error fetching candle: {e} {curr} {err}") success = False if success: times_inserted = 0 for line in candle: ts = datetime.fromtimestamp(line[0]/1e3) #check here if we have a ms timestamp or not for l in line: if l == None: l = 0 #this is another error check condition for when null values slip into the data. statement = "INSERT INTO {} (timestamp, datetime, pair, open, high, low, close, volume) VALUES ({}, '{}', '{}', {}, {}, {}, {}, {});".format(e, line[0], ts, ticker.replace("/", "-"), line[1], line[2], line[3], line[4], line[5]) try: db_n.execute(statement) db_n.commit() times_inserted += len(candle) except sqlite3.OperationalError as op: nulls = [] c = 0 # identify where the null value is for l in line: if l == None: nulls.append(c) c += 1 print(f"exchange: {e} currency: {curr}\nsql statement: {statement}\nerror: {op}(moving on)") log.error(f"exchange: {e} currency: {curr} sql statement: {statement} error: {op}") now = datetime.now() print(f"[{now}] | inserted into {e} {curr} {times_inserted} times") log.info(f"[{now}] | inserted into {e} {curr} {times_inserted} times") else: try: price = ex_objs[e].fetch_ticker(ticker) except Exception as err: print(f"error fetching ticker: {err}") log.error(f"error fetching ticker: {err}") success = False if success: ts = None try: if is_ms(int(price['timestamp'])): ts = datetime.fromtimestamp(int(price['timestamp'])/1e3) else: ts = datetime.fromtimestamp(int(price['timestamp'])) except OverflowError as oe: print(f"{oe} caused by {ts}") ticker = ticker.replace("/", "-") statement = f"INSERT INTO {e} (timestamp, datetime, pair, open, high, low, close, volume) VALUES ({price["timestamp"]}, '{ts}", "{ticker}', 0.0, 0.0, 0.0, {price["last"]}, 0.0);" db_n.execute(statement) db_n.commit() now = datetime.now() print(f"[{now}] | inserted into {e} {curr} VALUE: {price["last"]}") log.info(f"[{now}] | inserted into {e} {curr} VALUE: {price["last"]}") time.sleep(interval) # Thread method. Makes requests every interval seconds. # Adding this method here to make request more versatile while maintaining the same behavior def request_periodically(exchanges, interval): db_n = sqlite3.connect(p, timeout=30) while True: request(exchanges,interval,db_n) # Split the list of exchanges into chunks up to size chunk_size. # Create a thread for each chunk and start it, then add the thread to a list. # Return a list of tuples that contain the list of whats in each chunk and a list of the actual thread objects. def request_fast(exchanges,interval, chunk_size): count = 0 chunks = [] threads = [] current_chunk = [] # split up the list of exchanges for e in exchanges: if count < chunk_size: current_chunk.append(e) count += 1 else: count = 0 chunks.append(current_chunk) current_chunk = [] # Start a thread for each chunk for chunk in chunks: print(f"creating thread for chunk {chunk}") log.info(f"creating thread for chunk {chunk}") cThread = Thread(target=request_periodically, args=(chunk,interval)) cThread.start() threads.append(cThread) return (chunks, threads) # Fetch the complete historical data for an exchange for a given time interval in milliseconds # start_date is the oldest date # end_date is the newest date def request_history(exchange, currency, start_date, end_date): global interval db_n = sqlite3.connect(p, timeout=10) ticker = f"BTC/{currency}" while start_date < end_date: #params = {'limit': 10000, 'start': start_date, 'end': int((datetime.fromtimestamp(start_date/1e3) + timedelta(hours=2)).timestamp()*1e3)} params = {'start': start_date, 'end': end_date} tick = ex_objs[exchange].fetch_ohlcv(symbol=ticker, timeframe='1m', params=params) for line in tick: dt = None symbol = ticker.replace("/", "-") try: if is_ms(int(line['timestamp'])): dt = datetime.fromtimestamp(line['timestamp'] / 1e3) else: dt = datetime.fromtimestamp(line['timestamp']) statement = f"INSERT INTO {exchange} (timestamp, datetime, pair, open, high, low, close, volume) VALUES ({line["timestamp"]}, '{dt}", "{symbol}', 0.0, 0.0, 0.0, {line["last"]}, 0.0);" except TypeError: if line[0] % 1000 == 0: dt = datetime.fromtimestamp(line[0] / 1e3) else: dt = datetime.fromtimestamp(line[0]) statement = f"INSERT INTO {exchange} (timestamp, datetime, pair, open, high, low, close, volume) VALUES ({line[0]}, '{dt}', '{symbol}', {line[1]}, {line[2]}, {line[3]}, {line[4]}, {line[5]});" db_n.execute(statement) db_n.commit() l = len(tick) print(f"table: {exchange} period: {start_date} to {end_date} rows inserted: {l}") log.info(f"table: {exchange} period: {start_date} to {end_date} rows inserted: {l}") start_date += 1e4 #leaving this hardcoded for now start_date = int(start_date) time.sleep(interval) # Create a thread for each exchange that needs history. def request_history_periodically(histExchanges, currencies, start_date): history_threads = [] for h in histExchanges: hThread = Thread(target=request_history, args=(h, "USD", historyEnd, datetime.now().timestamp()*1e3)) hThread.start() history_threads.append(hThread) print(f"started thread for {h}") log.info(f"started thread for {h}") return history_threads # Read the values stored in the config file and store them in memory. # Run during install and at every run of the server. # Returns void def read_config(): global exchanges global interval global performance_mode global averaging_time global exchange_limit global historicalExchanges global historyEnd global keepWeeks global on_demand with open(configPath, "r") as f: lines = f.readlines() #read each line in the file for line in lines: #split the current line setting_line = line.split("=") #if there are invalid lines in the file ignore them if "#" in setting_line[0]: pass #ignore comments elif setting_line[0] not in allowedFields and "#" not in setting_line[0]: print(f"invalid config setting {setting_line[0]}") log.error(f"invalid config setting {setting_line[0]}") elif setting_line[0] == "keepWeeks": try: keepWeeks = int(setting_line[1]) except Exception as e: print(f"could not read keepWeeks field. Using default setting of {keepWeeks} weeks. Error: {e}") log.error(f"could not read keepWeeks field. Using default setting of {keepWeeks} weeks. Error: {e}") elif setting_line[0] == "exchanges": exs = setting_line[1].split(" ") for e in exs: e = e.replace("\n", "") if e == "all": exchanges = list(ex_objs.keys()) on_demand = True break if e not in exchanges and is_supported(e) == True: exchanges.append(e) else: print(f"{e} is not supported by CCXT!") log.error(f"{e} is not supported by CCXT!") elif setting_line[0] == "currencies": currs = setting_line[1].split(" ") for c in currs: #need to make sure currency codes are all caps and have newlines dropped off c_formatted = (c.replace("\n", "")).upper() if c_formatted not in currencies: if "\n" in c: currencies.append(c_formatted) else: currencies.append(c_formatted) elif setting_line[0] == "interval": interval = int(setting_line[1]) elif setting_line[0] == "exchange_limit": try: exchange_limit = int((setting_line[1].replace("\n", ""))) except TypeError: print("invalid value in exchange_limit field. Must be an integer") log.error("invalid value in exchange_limit field. Must be an integer") elif setting_line[0] == "averaging_time": try: averaging_time = int((setting_line[1]).replace("\n", "")) except TypeError: print("invalid value in averaging_time field. Must be an integer (number of hours)") log.error("invalid value in averaging_time field. Must be an integer (number of hours)") elif setting_line[0] == "historicalExchanges": hists = setting_line[1].split(" ") for h in hists: h = (h.replace("\n", "")) historicalExchanges.append(h) print(f"collecting history for {historicalExchanges}") log.error(f"collecting history for {historicalExchanges}") elif setting_line[0] == "historyEnd": try: historyEnd = int((setting_line[1]).replace("\n", "")) except TypeError: print("invalid value in historyEnd. Must be millisecond timestamp (integer)") log.error("invalid value in historyEnd. Must be millisecond timestamp (integer)") else: return #print statement for debugging len_exchanges = len(exchanges) if len_exchanges > exchange_limit: print(f"{len_exchanges} exchanges detected. Using performance mode (multithreading)") log.info(f"{len_exchanges} exchanges detected. Using performance mode (multithreading)") performance_mode = True print(f" Settings read:\n keepWeeks: {keepWeeks}\n exchanges: {exchanges}\n currencies: {currencies}\n interval: {interval}\n exchange_limit: {exchange_limit}\n averaging_time: {averaging_time}\n historicalExchanges: {historicalExchanges}\n historyEnd: {historyEnd}") log.info(f" Settings read:\n keepWeeks: {keepWeeks}\n exchanges: {exchanges}\n currencies: {currencies}\n interval: {interval}\n exchange_limit: {exchange_limit}\n averaging_time: {averaging_time}\n historicalExchanges: {historicalExchanges}\n historyEnd: {historyEnd}") # Check for empty tables in the database def poke_db(exchanges): global score db_n = sqlite3.connect(p) empties = 0 for e in exchanges: statement = f"SELECT * FROM {e} ORDER BY timestamp DESC LIMIT 1;" c = db_n.execute(statement) db_n.commit() res = c.fetchone() if res == None: print(f"{e} table is empty!") log.info(f"{e} table is empty!") score = (empties / len(exchanges))*100 print(f"{score}% of tables are empty") return score # Find gaps in an exchanges database back to historyEnd and create a list of those gaps as tuples def find_gaps(exchange, currency): global historyEnd db_n = sqlite3.connect(p) currency = currency.upper() ticker = f"BTC-{currency}" statement = f"SELECT timestamp FROM {exchange} LIMIT 1;" c = db_n.execute(statement) res = c.fetchone() if res != None and is_ms(int(res[0])): statement = f"SELECT timestamp,datetime FROM {exchange} WHERE pair = '{ticker}' AND timestamp > {historyEnd} ORDER BY timestamp;" else: statement = f"SELECT timestamp, datetime FROM {exchange} WHERE pair = '{ticker}' AND timestamp > {historyEnd / 1e3} ORDER BY timestamp;" c = db_n.execute(statement) res = c.fetchall() report = {} # later in time is higer ids i = 0 key = 0 stop = len(res) #make the time gap a configurable param while i < stop-1: if res[i+1][0] > res[i][0]+1000000: #report.append((res[i], res[i+1])) report[key] = f"{res[i][0]}-{res[i+1][0]}" key +=1 i += 1 return report # Fill gaps in a table via request_history def backfill(report, exchange, currency): for key in report: print(f"filling gap {key}") rang = report[key].split("-") start = int(rang[0]) end = int(rang[1]) request_history(exchange, currency, start, end) # This method is called at the first run. # It sets up the required tables inside of a local sqlite3 database. There is one table for each exchange. # Tables are only created if they do not already exist. Install will attempt to create tables for every listed exchange at once when called. def install(): read_config() #create the sqlite db len_exchanges = len(exchanges) print(f"creating tables for {len_exchanges} exchanges if they do not exist already.") log.info(f"creating tables for {len_exchanges} exchanges if they do not exist already.") for exchange in exchanges: sql = f"CREATE TABLE IF NOT EXISTS {exchange} (id INTEGER PRIMARY KEY AUTOINCREMENT, timestamp INTEGER, datetime TEXT, pair TEXT, open REAL, high REAL, low REAL, close REAL, volume REAL)" print(f"created table for {exchange}") log.info(f"created table for {exchange}") db.execute(sql) db.commit() db.close() # Remove every entry older than now-keepWeeks from all tables in the database # if there is nothing to prune then nothing will be pruned. def prune(keepWeeks): # prune checks will run continuously and check every 60k seconds right now. db_n = sqlite3.connect(p, timeout=10) while True: for exchange in exchanges: #count = ((db.execute("SELECT Count(*) FROM {}".format(exchange))).fetchone())[0] if exchange not in historicalExchanges: check = f"SELECT MAX(timestamp) FROM {exchange};" cursor = db_n.execute(check) check_ts = cursor.fetchone() statement = "" if check_ts[0] is not None: try: if is_ms(int(check_ts[0])): cutoff = (datetime.now()-timedelta(weeks=keepWeeks)).timestamp()*1000 statement = f"DELETE FROM {exchange} WHERE timestamp < {cutoff};" else: cutoff = (datetime.now()-timedelta(weeks=keepWeeks)).timestamp() statement = f"DELETE FROM {exchange} WHERE timestamp < {cutoff};" while True: try: db_n.execute(statement) break except sqlite3.OperationalError as op: log.error(f"{op}. Trying again in one hour...") print(f"{op}. Trying again in one hour...") time.sleep(3600) db_n.commit() except TypeError as te: log.error(f"too early to prune {te}") time.sleep(60000) if __name__ == "__main__": install() #install will call read_config chunk_size = optimize_chunks(cpuOffset=0) threadResults = None # spin up many threads if there is a lot of exchanges present in the config file if performance_mode: # request_fast will create and start the threads automatically print("performance mode is ON") log.info("performance mode is ON") threadResults = request_fast(exchanges, interval, chunk_size) else: print("performance mode is OFF") log.info("performance mode is OFF") prices_thread = Thread(target=request_periodically, args=(exchanges,interval)) prices_thread.start() request_history_periodically(historicalExchanges, currencies, historyEnd) pruning_thread = Thread(target=prune, args=[keepWeeks]) pruning_thread.start() app.run() db.close()
from flask import Flask, request as flaskRequest, jsonify, render_template import requests import json import time from datetime import datetime, timedelta import sqlite3 import sys import ccxt import os from threading import Thread from pathlib import Path import logging #setup the logging module for file output log = logging.getLogger('spotbit') log.setLevel(logging.DEBUG) logFileHandler = logging.FileHandler('/home/spotbit/.spotbit/spotbit.log') logFileHandler.setLevel(logging.DEBUG) log.addHandler(logFileHandler) #Config Settings allowedFields = ["keepWeeks", "exchanges", "currencies", "interval", "exchange_limit", "averaging_time", "historicalExchanges", "historyEnd"] configPath = Path("/home/spotbit/.spotbit/spotbit.config") #Default values; these will be overwritten when the config file is read exchanges = [] historicalExchanges = [] # exchanges that we want the history of currencies = [] interval = 10 #time to wait between GET requests to servers, to avoid ratelimits keepWeeks = 3 # add this to the config file exchange_limit = 200 #when there are more exchanges than this multithreading is ideal performance_mode = False averaging_time = 1 # the number of hours that we should average information over historyEnd = 0 on_demand = False # whether or not we are caching data score = 0 #the current percent of empty tables #the information regarding the current thread threadResults = None # curated exchange lists for creating averages curated_exchanges = {'USD': ['coinbasepro', 'okcoin', 'bitfinex', 'kraken', 'bitstamp'], 'GBP': ['coinbasepro', 'coinsbank', 'bitstamp', 'kraken', 'cexio'], 'EUR': ['kraken', 'coinbasepro', 'bitstamp', 'bitfinex', 'indoex'], 'JPY': ['bitflyer', 'liquid', 'coincheck', 'bitbank', 'zaif'], 'USDT': ['binance', 'okex', 'huobipro', 'bitmax', 'gateio']} curated_exchanges_list = ['gemini', 'bitstamp', 'okcoin', 'coinsbit', 'coinbasepro', 'coinsbank', 'kraken', 'cexio', 'bitfinex', 'indoex', 'bitflyer', 'liquid', 'coincheck', 'bitbank', 'zaif', 'hitbtc', 'binance', 'okex', 'gateio', 'bitmax'] curated_currencies = ['USD', 'GBP', 'EUR', 'JPY', 'AUD', 'USDT'] p = Path("/home/spotbit/.spotbit/sb.db") db = sqlite3.connect(p) print(f"db opened in {p}") log.debug(f"db opened in {p}") ONION = "" try: ONION = os.environ["ONION"] #get this value from the path print(f"spotbit is running at {ONION}") except Exception as e: print(f"cant find ONION in PATH {e}") # Database configuration # We need to have the database opened manually once so that systemd can access it def configure_db(): p = Path("/home/spotbit/.spotbit/sb.db") db = sqlite3.connect(p) print(f"db opened in {p}") log.debug(f"db opened in {p}") app = Flask(__name__) # split up the number of exchanges per chunk based on how many cpu cores are available # cpuOffset: the number of cores you want to try and utilize. def optimize_chunks(cpuOffset): return int(len(exchanges) / (os.cpu_count()-cpuOffset)) # Create a dict that contains ccxt objects for every supported exchange. # The API will query a subset of these exchanges based on what the user has specified # Unsupported exchanges: bitvaro phemex vaultoro # Future Plans: # Hard coding supported exchanges is a bad practice. CCXT autogenerates code for each exchange and therefore at least in theory may frequently support new exchanges. # Need to find a way to automatically create a list of exchange objects. # btctradeim doesn't want to work on raspberry pi def init_supported_exchanges(): objects = {"acx":ccxt.acx(), "aofex":ccxt.aofex(), "bequant":ccxt.bequant(), "bibox":ccxt.bibox(), "bigone":ccxt.bigone(), "binance":ccxt.binance(), "bitbank":ccxt.bitbank(), "bitbay":ccxt.bitbay(), "bitfinex":ccxt.bitfinex(), "bitflyer":ccxt.bitflyer(), "bitforex":ccxt.bitforex(), "bithumb":ccxt.bithumb(), "bitkk":ccxt.bitkk(), "bitmax":ccxt.bitmax(), "bitstamp":ccxt.bitstamp(), "bittrex":ccxt.bittrex(), "bitz":ccxt.bitz(), "bl3p":ccxt.bl3p(), "bleutrade":ccxt.bleutrade(), "braziliex":ccxt.braziliex(), "btcalpha":ccxt.btcalpha(), "btcbox":ccxt.btcbox(), "btcmarkets":ccxt.btcmarkets(), "btctradeua":ccxt.btctradeua(), "bw":ccxt.bw(), "bybit":ccxt.bybit(), "bytetrade":ccxt.bytetrade(), "cex":ccxt.cex(), "chilebit":ccxt.chilebit(), "coinbase":ccxt.coinbase(), "coinbasepro":ccxt.coinbasepro(), "coincheck":ccxt.coincheck(), "coinegg":ccxt.coinegg(), "coinex":ccxt.coinex(), "coinfalcon":ccxt.coinfalcon(), "coinfloor":ccxt.coinfloor(), "coinmate":ccxt.coinmate(), "coinone":ccxt.coinone(), "crex24":ccxt.crex24(), "currencycom":ccxt.currencycom(), "digifinex":ccxt.digifinex(), "dsx":ccxt.dsx(), "eterbase":ccxt.eterbase(), "exmo":ccxt.exmo(), "exx":ccxt.exx(), "foxbit":ccxt.foxbit(), "ftx":ccxt.ftx(), "gateio":ccxt.gateio(), "gemini":ccxt.gemini(), "hbtc":ccxt.hbtc(), "hitbtc":ccxt.hitbtc(), "hollaex":ccxt.hollaex(), "huobipro":ccxt.huobipro(), "ice3x":ccxt.ice3x(), "independentreserve":ccxt.independentreserve(), "indodax":ccxt.indodax(), "itbit":ccxt.itbit(), "kraken":ccxt.kraken(), "kucoin":ccxt.kucoin(), "lakebtc":ccxt.lakebtc(), "latoken":ccxt.latoken(), "lbank":ccxt.lbank(), "liquid":ccxt.liquid(), "luno":ccxt.luno(), "lykke":ccxt.lykke(), "mercado":ccxt.mercado(), "oceanex":ccxt.oceanex(), "okcoin":ccxt.okcoin(), "okex":ccxt.okex(), "paymium":ccxt.paymium(), "poloniex":ccxt.poloniex(), "probit":ccxt.probit(), "southxchange":ccxt.southxchange(), "stex":ccxt.stex(), "surbitcoin":ccxt.surbitcoin(), "therock":ccxt.therock(), "tidebit":ccxt.tidebit(), "tidex":ccxt.tidex(), "upbit":ccxt.upbit(), "vbtc":ccxt.vbtc(), "wavesexchange":ccxt.wavesexchange(), "whitebit":ccxt.whitebit(), "yobit":ccxt.yobit(), "zaif":ccxt.zaif(), "zb":ccxt.zb()} return objects # Check if a given exchange is in the list of supported exchanges. # Currently, the list of supported exchanges is all those supported by ccxt aside from a small handful that did not seem to work properly. May be bug in ccxt or just a typo in their code / docs def is_supported(exchange): try: obj = ex_objs[exchange] if obj != None: return True else: return False except Exception as e: print(f"caught an error: {e}") log.error(f"caught an error {e}") return False # Check if a timestamp has ms precision by modding by 1000 def is_ms(timestamp): if timestamp % 1000 == 0: return True return False # We create a list of all exchanges to do error checking on user input ex_objs = init_supported_exchanges() num_exchanges = len(ex_objs) print(f"created list of {num_exchanges}") log.info(f"created list of {num_exchanges}") @app.route('/') def index(): date_start = (datetime.now() - timedelta(days=5)).timestamp()*1e3 date_end = (datetime.now()).timestamp()*1e3 f0 = f"{ONION}/now/USD/coinbasepro" f1 = f"{ONION}/now/USD" f2 = f"{ONION}/hist/USD/coinbasepro/{date_start}/{date_end}" f3 = f"{ONION}/configure" return render_template('index.html', fetch_0=f0,fetch_1=f1,fetch_2=f2,fetch_3=f3,date_start=date_start,date_end=date_end) # TODO: create an html page to render here @app.route('/status') def status(): global score global threadResults if performance_mode: l = len(threadResults) content = f"Threads: {l}" for chunk, thread in threadResults: html += f"{chunk} at memory address: {thread}" return f"<html><p>{content}</p></html>" else: return "server is running" # configure the settings of Spotbit while the server is still running # send a GET request to this route to view current settings # send a POST request to this route with settings fields stored in JSON to update settings # TODO: make the updates persistant by also writing them to file. @app.route('/configure', methods=['GET', 'POST']) def configure(): # seems like this needs to be done in order to reference global vars inside of the flask server thread global keepWeeks global currencies global exchanges global interval global on_demand if flaskRequest.method == 'POST': #return the config settings TODO: error check so that the user doesn't have to submit everything at once. Also implement a form here. keepWeeks = flaskRequest.json("keepWeeks") exchanges = flaskRequest.json("exchanges") currencies = flaskRequest.json("currencies") interval = flaskRequest.json("interval") return {'updated settings?':'yes', 'keepWeeks':keepWeeks, 'currencies':currencies, 'exchanges':exchanges, 'interval':interval} else: return {'updated settings?':'no', 'keepWeeks':keepWeeks, 'currencies':currencies, 'on demand exchanges':list(ex_objs.keys()), 'cached exchanges': exchanges, 'interval':interval} # return averages in a list of tuples # find the oldest timestamp in the list of tuples def average_price_value(tuple_list, tuple_length, ticker): running_sums = [0] * tuple_length oldest_timestamp = 1e13 for tup in tuple_list: if tup != None and tup[1] < oldest_timestamp: oldest_timestamp = tup[1] for i in range(0,tuple_length): if i > 3: running_sums[i] += tup[i] list_len = len(tuple_list) return {'id': 'average_value', 'timestamp': (datetime.now()).timestamp()*1e3, 'datetime': datetime.now(), 'oldest_timestamp': oldest_timestamp, 'currency_pair': ticker, 'open': running_sums[4]/list_len, 'high': running_sums[5]/list_len, 'low': running_sums[6]/list_len, 'close': running_sums[7]/list_len, 'volume': running_sums[8]/list_len} # route for when a call is made without specifying an exchange. # return an average of the 5 curated exchanges for that currency @app.route('/now/<currency>') def now_noex(currency): global averaging_time db_n = sqlite3.connect(p, timeout=30) currency = currency.upper() ticker = f"BTC-{currency}" # only calculate averages if a list has been curated already if currency in curated_currencies: components = curated_exchanges[currency] failed_exchanges = [] components_list = [] for exchange in components: ms_check = f"SELECT timestamp FROM {exchange} LIMIT 1;" cursor = db_n.execute(ms_check) res = cursor.fetchone() # only take values from within 15 min of present ts_delta = (datetime.now() - timedelta(hours=averaging_time)).timestamp() if res!= None and is_ms(int(res[0])): ts_delta *= 1e3 statement = f"SELECT * FROM {exchange} WHERE pair = '{ticker}' AND timestamp > {ts_delta} ORDER BY timestamp DESC LIMIT 1;" cursor = db_n.execute(statement) res = cursor.fetchone() if res != None: components_list.append(res) else: # if there is no data in the table yet, then try a direct request. res = fallback_to_direct(exchange, currency, db_n) if len(res) < 2: log.error(f"could not get data from {exchange}") failed_exchanges.append(exchange) else: components_list.append(res) result = average_price_value(components_list, 9, ticker) result['exchanges'] = components result['failed_exchanges'] = failed_exchanges return result # Get the latest price entry in the database. # Currency: the three letter base currency desired. Must be a currency you are already collecting data for # Exchange: the exchange to query data for from the local database. @app.route('/now/<currency>/<exchange>') def now(currency, exchange): db_n = sqlite3.connect(p, timeout=30) ticker = "BTC-{}".format(currency.upper()) if exchange in exchanges: #if the exchange is already in the config file #statement = "SELECT * FROM {} WHERE pair = '{}' AND timestamp = (SELECT MAX(timestamp) FROM {});".format(exchange, ticker, exchange) statement = f"SELECT * FROM {exchange} WHERE pair = '{ticker}' ORDER BY timestamp DESC LIMIT 1;" try: cursor = db_n.execute(statement) res = cursor.fetchone() except sqlite3.OperationalError: print("database is locked. Cannot access it") log.error("database is locked. Cannot access it") return {'err': 'database locked'} if res != None: db_n.close() return {'id':res[0], 'timestamp':res[1], 'datetime':res[2], 'currency_pair':res[3], 'open':res[4], 'high':res[5], 'low':res[6], 'close':res[7], 'vol':res[8]} else: db_n.close() return fallback_to_direct(exchange, currency, db_n) elif exchange == "all": #if all is selected then we select from all exchanges and average the latest close result_set = [] for e in exchanges: ts_cutoff = (datetime.now() - timedelta(hours=averaging_time)).timestamp() check_ms = f"SELECT timestamp FROM {e} LIMIT 1;" cursor = db_n.execute(check_ms) db_n.commit() ts = cursor.fetchone() if ts != None and is_ms(int(ts[0])): print(f"using millisecond precision for {e}") logging.info(f"using millisecond precision for {e}") ts_cutoff *= 1e3 statement = f"SELECT timestamp, close FROM {e} WHERE timestamp > {ts_cutoff} AND pair = '{ticker}' ORDER BY timestamp LIMIT 1;" cursor = db_n.execute(statement) db_n.commit() result = cursor.fetchone() if result != None: result_set.append(result[1]) return {'ticker': list_mean(result_set)} else: return fallback_to_direct(exchange, currency, db_n) # This method will directly request an exchange that is supported but who's table is also empty def fallback_to_direct(exchange, currency, db_n): #make a direct request ticker = "BTC-{}".format(currency.upper()) res = request_single(exchange, currency) #db_n.close() if res != None: return res else: return {'id': res} # Find the mean of a list of two-value tuples def list_mean(input_list): avg = 0.0 for l in input_list: avg += l return avg/len(input_list) # Get data from local storage inside of a certain range. # Parameters: # Currency: the fiat base currency to fetch data for. Should be a three letter currency code in lowercase. # Exchange: the exchange to get data from. # date_start and date_end: date_start is the oldest time value in the range desired. It can be provided as a millisecond timestamp or as a datetime formatted as "YYYY-MM-DDTHH:mm:SS". @app.route('/hist/<currency>/<exchange>/<date_start>/<date_end>', methods=['GET']) def hist(currency, exchange, date_start, date_end): db_n = sqlite3.connect(p, timeout=10) ticker = "BTC-{}".format(currency.upper()) #check what format of dates we have if (str(date_start)).isdigit(): date_s = int(date_start) date_e = int(date_end) else: #error checking for malformed dates try: date_s = (datetime.fromisoformat(date_start.replace("T", " "))).timestamp()*1000 date_e = (datetime.fromisoformat(date_end.replace("T", " "))).timestamp()*1000 except Exception: return "malformed dates. Provide both dates in the same format: use YYYY-MM-DDTHH:mm:SS or millisecond timestamps" # check the table we want to select from to see the precision of it check = f"SELECT timestamp FROM {exchange} ORDER BY timestamp DESC LIMIT 1;" cursor = db_n.execute(check) statement = "" ts = cursor.fetchone() if ts != None and is_ms(int(ts[0])): statement = f"SELECT * FROM {exchange} WHERE timestamp > {date_s} AND timestamp < {date_e} AND pair = '{ticker}';" else: # for some exchanges we cannot use ms precision timestamps (such as coinbase) date_s /= 1e3 date_e /= 1e3 statement = f"SELECT * FROM {exchange} WHERE timestamp > {date_s} AND timestamp < {date_e} AND pair = '{ticker}';" # keep trying in case of database locked error while True: try: cursor = db_n.execute(statement) break except sqlite3.OperationalError as oe: time.sleep(5) res = cursor.fetchall() db_n.close() return {'columns': ['id', 'timestamp', 'datetime', 'currency_pair', 'open', 'high', 'low', 'close', 'vol'], 'data':res} # Return all database rows within `tolerance` for each of the supplied dates # Dates should be provided as millisecond timestamps separated by hyphens @app.route('/hist/<currency>/<exchange>/<dates>') def hist_single_dates(currency, exchange, dates): db_n = sqlite3.connect(p, timeout=10) ticker = "BTC-{}".format(currency.upper()) dates_list = dates.split("-") # the number of minutes away from a given date that is considered acceptable tolerance = 30 results = {} check_ms = f"SELECT timestamp FROM {exchange} LIMIT 1;" cursor = db_n.execute(check_ms) ts = cursor.fetchone() ms_precision = True if ts != None and is_ms(int(ts[0])) != True: ms_precision = False for d in dates_list: try: ts = int(d) except Exception: return f"malformed date {d}" dt = datetime.fromtimestamp(ts/1e3) lower_bound = (dt - timedelta(minutes=tolerance)).timestamp()*1e3 upper_bound = (dt + timedelta(minutes=tolerance)).timestamp()*1e3 if ms_precision == False: ts /= 1e3 statement = f"SELECT * FROM {exchange} WHERE pair = '{ticker}' AND timestamp > {lower_bound} AND timestamp > {upper_bound} ORDER BY timestamp ASC;" # right now we return everything while True: try: cursor = db_n.execute(statement) res = cursor.fetchall()[0] break except sqlite3.OperationalError: time.sleep(2) if res != None: results[f"{d}"] = {'id':res[0], 'timestamp':res[1], 'datetime':res[2], 'pair':res[3], 'open':res[4], 'high':res[5], 'low':res[6], 'close':res[7], 'vol':res[8]} else: results[f"{d}"] = None return results # Make a single request, without having to loop through all exchanges and currency pairs. # This is intended for when the user requests an exchange in /now that is not present in the database. # It will probably not be used for /hist because of the length of time getting arbitrary amounts of historical data can be def request_single(exchange, currency): if not is_supported(exchange): return f"{exchange} is not supported by CCXT" obj = ex_objs[exchange] ticker = "BTC/{}".format(currency.upper()) dt = None if obj.has['fetchOHLCV']: tframe = '1m' # drop all this in a separate method lim = 1000 if exchange == "bleutrade" or exchange == "btcalpha" or exchange == "rightbtc" or exchange == "hollaex": tframe = '1h' if exchange == "poloniex": tframe = '5m' # some exchanges have explicit limits on how many candles you can get at once if exchange == "bitstamp": lim = 1000 if exchange == "bybit": lim = 200 if exchange == "eterbase": lim = 1000000 if exchange == "exmo": lim = 3000 if exchange == "btcalpha": lim = 720 result = None if exchange == "bitfinex": #other exchanges requiring special conditions: bitstamp, bitmart params = {'limit':100, 'start':(round((datetime.now()-timedelta(hours=1)).timestamp()*1000)), 'end':round(datetime.now().timestamp()*1000)} try: result = ex_objs[exchange].fetch_ohlcv(symbol=ticker, timeframe=tframe, since=None, params=params) except Exception as e: print(f"got an error requesting info from {exchange}: {e}") logging.error(f"got an error requesting info frm {exchange}: {e}") else: try: result = obj.fetch_ohlcv(symbol=ticker, timeframe=tframe, since=None, limit=lim) except Exception as e: print(f"got an error requesting info from {exchange}: {e}") logging.error(f"got an error requesting info from {exchange}: {e}") else: try: result = obj.fetch_ticker(ticker) if result != None and is_ms(result['timestamp']) == False: dt = datetime.fromtimestamp(result['timestamp']) else: dt = datetime.fromtimestamp(result['timestamp'] / 1e3) if result != None: return {'close': result['close'], 'symbol': ticker, 'timestamp': result['timestamp'], 'datetime': dt, 'volume': result['bidVolume'], 'id': 'on_demand'} except Exception as e: print(f"got ratelimited on {e}") logging.error(f"got ratelimited on {e}") if result != None: res = result[-1] if is_ms(res[0]): dt = datetime.fromtimestamp(res[0]/1e3) else: dt = datetime.fromtimestamp(res[0]) return {'id': 'on_demand', 'timestamp': res[0], 'datetime': dt, 'currency_pair': ticker, 'open': res[1], 'high': res[2], 'low': res[3], 'close': res[4], 'vol': res[5]} else: return "no data" # Make an HTTP GET request to exchanges via the ccxt API # TODO: add error checking for if an exchange supports ohlc data. If not, default to regular price data. (done) # Loop through all chosen exchanges, check if they are supported, loop through all chosen currencies, for each make request to ohlc endpoint if supported, else price ticker. Write data to local storage. # Bitfinex special rule: bitfinex returns candles from the beginning of time, not the most recent. This is a behavior of the API itself and has nothing to do with this code or ccxt. Therefore we must specify the timeframe desired in the optional params field of the function call with a dictionary of available options. def request(exchanges,interval,db_n): global currencies for e in exchanges: for curr in currencies: ticker = "BTC/{}".format(curr) success = True if ex_objs[e].has['fetchOHLCV']: candle = None tframe = '1m' lim = 1000 if e == "bleutrade" or e == "btcalpha" or e == "rightbtc" or e == "hollaex": tframe = '1h' if e == "poloniex": tframe = '5m' # some exchanges have explicit limits on how many candles you can get at once if e == "bitstamp": lim = 1000 if e == "bybit": lim = 200 if e == "eterbase": lim = 1000000 if e == "exmo": lim = 3000 if e == "btcalpha": lim = 720 if e == "bitfinex": params = {'limit':100, 'start':(round((datetime.now()-timedelta(hours=1)).timestamp()*1000)), 'end':round(datetime.now().timestamp()*1000)} try: candle = ex_objs[e].fetch_ohlcv(symbol=ticker, timeframe=tframe, since=None, params=params) if candle == None: raise Exception(f"candle from {e} is null") except Exception as err: #figure out this error type #the point so far is to gracefully handle the error, but waiting for the next cycle should be good enough if "does not have" not in str(err): print(f"error fetching candle: {e} {curr} {err}") log.error(f"error fetching candle: {e} {curr} {err}") success = False else: try: candle = ex_objs[e].fetch_ohlcv(symbol=ticker, timeframe=tframe, since=None, limit=lim) #'ticker' was listed as 'symbol' before | interval should be determined in the config file if candle == None: raise Exception(f"candle from {e} is nulll") except Exception as err: if "does not have" not in str(err): print(f"error fetching candle: {e} {curr} {err}") log.error(f"error fetching candle: {e} {curr} {err}") success = False if success: times_inserted = 0 for line in candle: ts = datetime.fromtimestamp(line[0]/1e3) #check here if we have a ms timestamp or not for l in line: if l == None: l = 0 #this is another error check condition for when null values slip into the data. statement = "INSERT INTO {} (timestamp, datetime, pair, open, high, low, close, volume) VALUES ({}, '{}', '{}', {}, {}, {}, {}, {});".format(e, line[0], ts, ticker.replace("/", "-"), line[1], line[2], line[3], line[4], line[5]) try: db_n.execute(statement) db_n.commit() times_inserted += len(candle) except sqlite3.OperationalError as op: nulls = [] c = 0 # identify where the null value is for l in line: if l == None: nulls.append(c) c += 1 print(f"exchange: {e} currency: {curr}\nsql statement: {statement}\nerror: {op}(moving on)") log.error(f"exchange: {e} currency: {curr} sql statement: {statement} error: {op}") now = datetime.now() print(f"[{now}] | inserted into {e} {curr} {times_inserted} times") log.info(f"[{now}] | inserted into {e} {curr} {times_inserted} times") else: try: price = ex_objs[e].fetch_ticker(ticker) except Exception as err: print(f"error fetching ticker: {err}") log.error(f"error fetching ticker: {err}") success = False if success: ts = None try: if is_ms(int(price['timestamp'])): ts = datetime.fromtimestamp(int(price['timestamp'])/1e3) else: ts = datetime.fromtimestamp(int(price['timestamp'])) except OverflowError as oe: print(f"{oe} caused by {ts}") ticker = ticker.replace("/", "-") statement = f"INSERT INTO {e} (timestamp, datetime, pair, open, high, low, close, volume) VALUES ({price['timestamp']}, '{ts}', '{ticker}', 0.0, 0.0, 0.0, {price['last']}, 0.0);" db_n.execute(statement) db_n.commit() now = datetime.now() print(f"[{now}] | inserted into {e} {curr} VALUE: {price['last']}") log.info(f"[{now}] | inserted into {e} {curr} VALUE: {price['last']}") time.sleep(interval) # Thread method. Makes requests every interval seconds. # Adding this method here to make request more versatile while maintaining the same behavior def request_periodically(exchanges, interval): db_n = sqlite3.connect(p, timeout=30) while True: request(exchanges,interval,db_n) # Split the list of exchanges into chunks up to size chunk_size. # Create a thread for each chunk and start it, then add the thread to a list. # Return a list of tuples that contain the list of whats in each chunk and a list of the actual thread objects. def request_fast(exchanges,interval, chunk_size): count = 0 chunks = [] threads = [] current_chunk = [] # split up the list of exchanges for e in exchanges: if count < chunk_size: current_chunk.append(e) count += 1 else: count = 0 chunks.append(current_chunk) current_chunk = [] # Start a thread for each chunk for chunk in chunks: print(f"creating thread for chunk {chunk}") log.info(f"creating thread for chunk {chunk}") cThread = Thread(target=request_periodically, args=(chunk,interval)) cThread.start() threads.append(cThread) return (chunks, threads) # Fetch the complete historical data for an exchange for a given time interval in milliseconds # start_date is the oldest date # end_date is the newest date def request_history(exchange, currency, start_date, end_date): global interval db_n = sqlite3.connect(p, timeout=10) ticker = f"BTC/{currency}" while start_date < end_date: #params = {'limit': 10000, 'start': start_date, 'end': int((datetime.fromtimestamp(start_date/1e3) + timedelta(hours=2)).timestamp()*1e3)} params = {'start': start_date, 'end': end_date} tick = ex_objs[exchange].fetch_ohlcv(symbol=ticker, timeframe='1m', params=params) for line in tick: dt = None symbol = ticker.replace("/", "-") try: if is_ms(int(line['timestamp'])): dt = datetime.fromtimestamp(line['timestamp'] / 1e3) else: dt = datetime.fromtimestamp(line['timestamp']) statement = f"INSERT INTO {exchange} (timestamp, datetime, pair, open, high, low, close, volume) VALUES ({line['timestamp']}, '{dt}', '{symbol}', 0.0, 0.0, 0.0, {line['last']}, 0.0);" except TypeError: if line[0] % 1000 == 0: dt = datetime.fromtimestamp(line[0] / 1e3) else: dt = datetime.fromtimestamp(line[0]) statement = f"INSERT INTO {exchange} (timestamp, datetime, pair, open, high, low, close, volume) VALUES ({line[0]}, '{dt}', '{symbol}', {line[1]}, {line[2]}, {line[3]}, {line[4]}, {line[5]});" db_n.execute(statement) db_n.commit() l = len(tick) print(f"table: {exchange} period: {start_date} to {end_date} rows inserted: {l}") log.info(f"table: {exchange} period: {start_date} to {end_date} rows inserted: {l}") start_date += 1e4 #leaving this hardcoded for now start_date = int(start_date) time.sleep(interval) # Create a thread for each exchange that needs history. def request_history_periodically(histExchanges, currencies, start_date): history_threads = [] for h in histExchanges: hThread = Thread(target=request_history, args=(h, "USD", historyEnd, datetime.now().timestamp()*1e3)) hThread.start() history_threads.append(hThread) print(f"started thread for {h}") log.info(f"started thread for {h}") return history_threads # Read the values stored in the config file and store them in memory. # Run during install and at every run of the server. # Returns void def read_config(): global exchanges global interval global performance_mode global averaging_time global exchange_limit global historicalExchanges global historyEnd global keepWeeks global on_demand with open(configPath, "r") as f: lines = f.readlines() #read each line in the file for line in lines: #split the current line setting_line = line.split("=") #if there are invalid lines in the file ignore them if "#" in setting_line[0]: pass #ignore comments elif setting_line[0] not in allowedFields and "#" not in setting_line[0]: print(f"invalid config setting {setting_line[0]}") log.error(f"invalid config setting {setting_line[0]}") elif setting_line[0] == "keepWeeks": try: keepWeeks = int(setting_line[1]) except Exception as e: print(f"could not read keepWeeks field. Using default setting of {keepWeeks} weeks. Error: {e}") log.error(f"could not read keepWeeks field. Using default setting of {keepWeeks} weeks. Error: {e}") elif setting_line[0] == "exchanges": exs = setting_line[1].split(" ") for e in exs: e = e.replace("\n", "") if e == "all": exchanges = list(ex_objs.keys()) on_demand = True break if e not in exchanges and is_supported(e) == True: exchanges.append(e) else: print(f"{e} is not supported by CCXT!") log.error(f"{e} is not supported by CCXT!") elif setting_line[0] == "currencies": currs = setting_line[1].split(" ") for c in currs: #need to make sure currency codes are all caps and have newlines dropped off c_formatted = (c.replace("\n", "")).upper() if c_formatted not in currencies: if "\n" in c: currencies.append(c_formatted) else: currencies.append(c_formatted) elif setting_line[0] == "interval": interval = int(setting_line[1]) elif setting_line[0] == "exchange_limit": try: exchange_limit = int((setting_line[1].replace("\n", ""))) except TypeError: print("invalid value in exchange_limit field. Must be an integer") log.error("invalid value in exchange_limit field. Must be an integer") elif setting_line[0] == "averaging_time": try: averaging_time = int((setting_line[1]).replace("\n", "")) except TypeError: print("invalid value in averaging_time field. Must be an integer (number of hours)") log.error("invalid value in averaging_time field. Must be an integer (number of hours)") elif setting_line[0] == "historicalExchanges": hists = setting_line[1].split(" ") for h in hists: h = (h.replace("\n", "")) historicalExchanges.append(h) print(f"collecting history for {historicalExchanges}") log.error(f"collecting history for {historicalExchanges}") elif setting_line[0] == "historyEnd": try: historyEnd = int((setting_line[1]).replace("\n", "")) except TypeError: print("invalid value in historyEnd. Must be millisecond timestamp (integer)") log.error("invalid value in historyEnd. Must be millisecond timestamp (integer)") else: return #print statement for debugging len_exchanges = len(exchanges) if len_exchanges > exchange_limit: print(f"{len_exchanges} exchanges detected. Using performance mode (multithreading)") log.info(f"{len_exchanges} exchanges detected. Using performance mode (multithreading)") performance_mode = True print(f" Settings read:\n keepWeeks: {keepWeeks}\n exchanges: {exchanges}\n currencies: {currencies}\n interval: {interval}\n exchange_limit: {exchange_limit}\n averaging_time: {averaging_time}\n historicalExchanges: {historicalExchanges}\n historyEnd: {historyEnd}") log.info(f" Settings read:\n keepWeeks: {keepWeeks}\n exchanges: {exchanges}\n currencies: {currencies}\n interval: {interval}\n exchange_limit: {exchange_limit}\n averaging_time: {averaging_time}\n historicalExchanges: {historicalExchanges}\n historyEnd: {historyEnd}") # Check for empty tables in the database def poke_db(exchanges): global score db_n = sqlite3.connect(p) empties = 0 for e in exchanges: statement = f"SELECT * FROM {e} ORDER BY timestamp DESC LIMIT 1;" c = db_n.execute(statement) db_n.commit() res = c.fetchone() if res == None: print(f"{e} table is empty!") log.info(f"{e} table is empty!") score = (empties / len(exchanges))*100 print(f"{score}% of tables are empty") return score # Find gaps in an exchanges database back to historyEnd and create a list of those gaps as tuples def find_gaps(exchange, currency): global historyEnd db_n = sqlite3.connect(p) currency = currency.upper() ticker = f"BTC-{currency}" statement = f"SELECT timestamp FROM {exchange} LIMIT 1;" c = db_n.execute(statement) res = c.fetchone() if res != None and is_ms(int(res[0])): statement = f"SELECT timestamp,datetime FROM {exchange} WHERE pair = '{ticker}' AND timestamp > {historyEnd} ORDER BY timestamp;" else: statement = f"SELECT timestamp, datetime FROM {exchange} WHERE pair = '{ticker}' AND timestamp > {historyEnd / 1e3} ORDER BY timestamp;" c = db_n.execute(statement) res = c.fetchall() report = {} # later in time is higer ids i = 0 key = 0 stop = len(res) #make the time gap a configurable param while i < stop-1: if res[i+1][0] > res[i][0]+1000000: #report.append((res[i], res[i+1])) report[key] = f"{res[i][0]}-{res[i+1][0]}" key +=1 i += 1 return report # Fill gaps in a table via request_history def backfill(report, exchange, currency): for key in report: print(f"filling gap {key}") rang = report[key].split("-") start = int(rang[0]) end = int(rang[1]) request_history(exchange, currency, start, end) # This method is called at the first run. # It sets up the required tables inside of a local sqlite3 database. There is one table for each exchange. # Tables are only created if they do not already exist. Install will attempt to create tables for every listed exchange at once when called. def install(): read_config() #create the sqlite db len_exchanges = len(exchanges) print(f"creating tables for {len_exchanges} exchanges if they do not exist already.") log.info(f"creating tables for {len_exchanges} exchanges if they do not exist already.") for exchange in exchanges: sql = f"CREATE TABLE IF NOT EXISTS {exchange} (id INTEGER PRIMARY KEY AUTOINCREMENT, timestamp INTEGER, datetime TEXT, pair TEXT, open REAL, high REAL, low REAL, close REAL, volume REAL)" print(f"created table for {exchange}") log.info(f"created table for {exchange}") db.execute(sql) db.commit() db.close() # Remove every entry older than now-keepWeeks from all tables in the database # if there is nothing to prune then nothing will be pruned. def prune(keepWeeks): # prune checks will run continuously and check every 60k seconds right now. db_n = sqlite3.connect(p, timeout=10) while True: for exchange in exchanges: #count = ((db.execute("SELECT Count(*) FROM {}".format(exchange))).fetchone())[0] if exchange not in historicalExchanges: check = f"SELECT MAX(timestamp) FROM {exchange};" cursor = db_n.execute(check) check_ts = cursor.fetchone() statement = "" if check_ts[0] is not None: try: if is_ms(int(check_ts[0])): cutoff = (datetime.now()-timedelta(weeks=keepWeeks)).timestamp()*1000 statement = f"DELETE FROM {exchange} WHERE timestamp < {cutoff};" else: cutoff = (datetime.now()-timedelta(weeks=keepWeeks)).timestamp() statement = f"DELETE FROM {exchange} WHERE timestamp < {cutoff};" while True: try: db_n.execute(statement) break except sqlite3.OperationalError as op: log.error(f"{op}. Trying again in one hour...") print(f"{op}. Trying again in one hour...") time.sleep(3600) db_n.commit() except TypeError as te: log.error(f"too early to prune {te}") time.sleep(60000) if __name__ == "__main__": install() #install will call read_config chunk_size = optimize_chunks(cpuOffset=0) threadResults = None # spin up many threads if there is a lot of exchanges present in the config file if performance_mode: # request_fast will create and start the threads automatically print("performance mode is ON") log.info("performance mode is ON") threadResults = request_fast(exchanges, interval, chunk_size) else: print("performance mode is OFF") log.info("performance mode is OFF") prices_thread = Thread(target=request_periodically, args=(exchanges,interval)) prices_thread.start() request_history_periodically(historicalExchanges, currencies, historyEnd) pruning_thread = Thread(target=prune, args=[keepWeeks]) pruning_thread.start() app.run() db.close()
# -*- coding: utf-8 -*- """ Configuration parsing and resolving =================================== """ import copy import getpass import hashlib import logging import os import git import yaml from numpy import inf as infinity import orion import orion.core from orion.core.io.orion_cmdline_parser import OrionCmdlineParser from orion.core.utils.flatten import unflatten def is_exe(path): """Test whether ``path`` describes an executable file.""" return os.path.isfile(path) and os.access(path, os.X_OK) log = logging.getLogger(__name__) ################################################################################ # Default Settings and Environmental Variables # ################################################################################ # Default settings for command line arguments (option, description) DEF_CMD_MAX_TRIALS = (infinity, "inf/until preempted") DEF_CMD_WORKER_TRIALS = (infinity, "inf/until preempted") DEF_CMD_POOL_SIZE = (1, str(1)) # list containing tuples of # (environmental variable names, configuration keys, default values) ENV_VARS_DB = [ ("ORION_DB_NAME", "name"), ("ORION_DB_TYPE", "type"), ("ORION_DB_ADDRESS", "host"), ("ORION_DB_PORT", "port"), ] # TODO: Default resource from environmental (localhost) # dictionary describing lists of environmental tuples (e.g. `ENV_VARS_DB`) # by a 'key' to be used in the experiment's configuration dict ENV_VARS = dict(database=ENV_VARS_DB) def _convert_dashes(config, ref): """Convert dash in keys to underscores based on a reference dict. The reference is used to avoid converting keys in dictionary that are values of options. """ config = copy.deepcopy(config) for key in list(config.keys()): converted_key = key.replace("-", "_") if converted_key in ref: config[converted_key] = config.pop(key) if all(isinstance(item[converted_key], dict) for item in [config, ref]): config[converted_key] = _convert_dashes( config[converted_key], ref[converted_key] ) return config # NOTE: Silencing this pylint error for now, but seriously this function is quite horrible. # We'll need to clean this up at some point... # pylint:disable=too-many-branches def fetch_config_from_cmdargs(cmdargs): """Turn flat cmdargs into nested dicts like orion.core.config.""" config_file = cmdargs.pop("config", None) tmp_cmdargs = copy.deepcopy(cmdargs) tmp_cmdargs["config"] = config_file cmdargs["config"] = config_file cmdargs = tmp_cmdargs cmdargs_config = {} if cmdargs.get("max_trials") is not None: log.warning( "--max-trials is deprecated and will be removed in v0.3. " "Use --exp-max-trials instead" ) cmdargs_config["experiment.max_trials"] = cmdargs.pop("max_trials") if cmdargs.get("worker_trials") is not None: log.warning( "--worker-trials is deprecated and will be removed in v0.3. " "Use --worker-max-trials instead" ) cmdargs_config["worker.max_trials"] = cmdargs.pop("worker_trials") mappings = dict( experiment=dict(max_broken="exp_max_broken", max_trials="exp_max_trials"), worker=dict(max_broken="worker_max_broken", max_trials="worker_max_trials"), evc=dict(enable="enable_evc"), ) global_config = orion.core.config.to_dict() for key in ["config", "user_args"]: if cmdargs.get(key) not in [False, None]: cmdargs_config[key] = cmdargs[key] for key in ["name", "user", "version"]: if cmdargs.get(key) not in [False, None]: cmdargs_config[f"experiment.{key}"] = cmdargs[key] for key in ["branch_from", "branch_to"]: if cmdargs.get(key) not in [False, None]: cmdargs_config[f"evc.{key}"] = cmdargs[key] # Apply config at the root for key in ["debug"]: # Adapt to cli arguments cli_key = mappings.get(key, key) value = cmdargs.pop(cli_key, None) if value is not None: cmdargs_config[f"{key}"] = value # Apply to subconfigs for key in ["experiment", "worker", "evc"]: for subkey in global_config[key].keys(): # Adapt to cli arguments cli_key = mappings.get(key, {}).get(subkey, subkey) value = cmdargs.pop(cli_key, None) if value is not None: cmdargs_config[f"{key}.{subkey}"] = value return unflatten(cmdargs_config) def fetch_config(args): """Return the config inside the .yaml file if present.""" orion_file = args.get("config") local_config = {} if orion_file: log.debug( "Found orion configuration file at: %s", os.path.abspath(orion_file.name) ) orion_file.seek(0) tmp_config = yaml.safe_load(orion_file) global_config = orion.core.config.to_dict() tmp_config = _convert_dashes(tmp_config, global_config) # Fix deprecations first because some names are shared by experiment and worker max_trials = tmp_config.pop("max_trials", None) if max_trials is not None: log.warning( "(DEPRECATED) Option `max_trials` is deprecated " "and will be removed in v0.3. Use instead the option" "\nexperiment:\n max_trials: %s", max_trials, ) local_config["experiment.max_trials"] = max_trials worker_trials = tmp_config.get("experiment", {}).pop("worker_trials", None) if worker_trials is not None: log.warning( "(DEPRECATED) Option `experiment.worker_trials` is deprecated " "and will be removed in v0.3. Use instead the option" "\nworker:\n max_trials: %s", worker_trials, ) local_config["worker.max_trials"] = worker_trials worker_trials = tmp_config.pop("worker_trials", None) if worker_trials is not None: log.warning( "(DEPRECATED) Option `worker_trials` is deprecated " "and will be removed in v0.3. Use instead the option" "\nworker:\n max_trials: %s", worker_trials, ) local_config["worker.max_trials"] = worker_trials producer = tmp_config.pop("producer", None) if producer is not None: log.warning( "(DEPRECATED) Option `producer` is deprecated " "and will be removed in v0.3. Use instead the option" "\nexperiment:\n strategy: %s", producer["strategy"], ) local_config["experiment.strategy"] = producer["strategy"] producer = tmp_config.get("experiment", {}).pop("producer", None) if producer is not None: log.warning( "(DEPRECATED) Option `experiment.producer` is deprecated " "and will be removed in v0.3. Use instead the option" "\nexperiment:\n strategy: %s", producer["strategy"], ) local_config["experiment.strategy"] = producer["strategy"] local_config = unflatten(local_config) # For backward compatibility for key in ["storage", "experiment", "worker", "evc"]: subkeys = list(global_config[key].keys()) # Arguments that are only supported locally if key == "experiment": subkeys += ["name", "version", "user"] elif key == "evc": subkeys += ["branch_from", "branch_to"] for subkey in subkeys: # Backward compatibility backward_value = tmp_config.pop(subkey, None) if backward_value is not None: log.warning( "(DEPRECATED) Option `%s` and will be removed in v0.3. " "Use instead the option" "\n%s:\n %s:\n %s", subkey, key, subkey, yaml.dump(backward_value, indent=6), ) value = tmp_config.get(key, {}).pop(subkey, backward_value) if value is not None: local_config.setdefault(key, {}) local_config[key][subkey] = value return local_config def fetch_env_vars(): """Fetch environmental variables related to orion's managerial data.""" env_vars = {} for signif, evars in ENV_VARS.items(): env_vars[signif] = {} for var_name, key in evars: value = os.getenv(var_name) if value is not None: env_vars[signif][key] = value return env_vars def fetch_metadata(user=None, user_args=None, user_script_config=None): """Infer rest information about the process + versioning""" metadata = {"user": user if user else getpass.getuser()} metadata["orion_version"] = orion.core.__version__ if user_args is None: user_args = [] # Trailing white space are catched by argparse as an empty argument if len(user_args) == 1 and user_args[0] == "": user_args = [] if user_script_config is None: user_script_config = orion.core.config.worker.user_script_config cmdline_parser = OrionCmdlineParser(user_script_config) cmdline_parser.parse(user_args) if cmdline_parser.user_script: # TODO: Remove this, it is all in cmdline_parser now metadata["user_script"] = cmdline_parser.user_script metadata["VCS"] = infer_versioning_metadata(cmdline_parser.user_script) if user_args: metadata["user_args"] = user_args metadata["parser"] = cmdline_parser.get_state_dict() metadata["user_script_config"] = user_script_config metadata["priors"] = dict(cmdline_parser.priors) return metadata def update_metadata(metadata): """Update information about the process + versioning""" metadata.setdefault("user", getpass.getuser()) metadata["orion_version"] = orion.core.__version__ if not metadata.get("user_args"): return metadata cmdline_parser = OrionCmdlineParser() cmdline_parser.set_state_dict(metadata["parser"]) if cmdline_parser.user_script: # TODO: Remove this, it is all in cmdline_parser now metadata["user_script"] = cmdline_parser.user_script metadata["VCS"] = infer_versioning_metadata(cmdline_parser.user_script) return metadata def merge_configs(*configs, differentiators=("type",)): """Merge configuration dictionaries following the given hierarchy Suppose function is called as merge_configs(A, B, C). Then any pair (key, value) in C would overwrite any previous value from A or B. Same apply for B over A. If for some pair (key, value), the value is a dictionary, then it will either overwrite previous value if it was not also a directory, or it will be merged following `merge_configs(old_value, new_value)`. .. warning: Redefinition of subdictionaries may lead to confusing results because merges do not remove data. If for instance, we have {'a': {'b': 1, 'c': 2}} and we would like to update `'a'` such that it only have `{'c': 3}`, it won't work with {'a': {'c': 3}}. merge_configs({'a': {'b': 1, 'c': 2}}, {'a': {'c': 3}}) -> {'a': {'b': 1, 'c': 3}} Examples -------- .. code-block:: python :linenos: a = {'a': 1, 'b': {'c': 2}} b = {'b': {'c': 3}} c = {'b': {'c': {'d': 4}}} m = resolve_config.merge_configs(a, b, c) assert m == {'a': 1, 'b': {'c': {'d': 4}}} a = {'a': 1, 'b': {'c': 2, 'd': 3}} b = {'b': {'c': 4}} c = {'b': {'c': {'e': 5}}} m = resolve_config.merge_configs(a, b, c) assert m == {'a': 1, 'b': {'c': {'e': 5}, 'd': 3}} """ merged_config = configs[0] def _can_be_merged(dict_a, dict_b): for differentiator in differentiators: if dict_a.get(differentiator, None) and dict_a[ differentiator ] != dict_b.get(differentiator, None): return False return True for config_i in configs[1:]: for key, value in config_i.items(): if ( isinstance(value, dict) and isinstance(merged_config.get(key), dict) and _can_be_merged(merged_config[key], value) ): merged_config[key] = merge_configs( merged_config[key], value, differentiators=differentiators ) elif value is not None: merged_config[key] = value return merged_config def fetch_user_repo(user_script): """Fetch the GIT repo and its root path given user's script.""" dir_path = os.path.dirname(os.path.abspath(user_script)) try: git_repo = git.Repo(dir_path, search_parent_directories=True) except git.exc.InvalidGitRepositoryError: git_repo = None logging.warning( "Script %s is not in a git repository. Code modification " "won't be detected.", os.path.abspath(user_script), ) return git_repo def infer_versioning_metadata(user_script): """ Infer information about user's script versioning if available. Fills the following information in VCS: `is_dirty` shows whether the git repo is at a clean state. `HEAD_sha` gives the hash of head of the repo. `active_branch` shows the active branch of the repo. `diff_sha` shows the hash of the diff in the repo. :returns: the `VCS` but filled with above info. """ git_repo = fetch_user_repo(user_script) if not git_repo: return {} if not git_repo.head.is_valid(): logging.warning( f"Repository at {git_repo.git.rev_parse("--show-toplevel")} has an invalid HEAD. " "No commits maybe?" ) return {} vcs = {} vcs["type"] = "git" vcs["is_dirty"] = git_repo.is_dirty() vcs["HEAD_sha"] = git_repo.head.object.hexsha if git_repo.head.is_detached: vcs["active_branch"] = None else: vcs["active_branch"] = git_repo.active_branch.name # The 'diff' of the current version from the latest commit diff = git_repo.git.diff(git_repo.head.commit.tree).encode("utf-8") diff_sha = hashlib.sha256(diff).hexdigest() vcs["diff_sha"] = diff_sha return vcs
# -*- coding: utf-8 -*- """ Configuration parsing and resolving =================================== """ import copy import getpass import hashlib import logging import os import git import yaml from numpy import inf as infinity import orion import orion.core from orion.core.io.orion_cmdline_parser import OrionCmdlineParser from orion.core.utils.flatten import unflatten def is_exe(path): """Test whether ``path`` describes an executable file.""" return os.path.isfile(path) and os.access(path, os.X_OK) log = logging.getLogger(__name__) ################################################################################ # Default Settings and Environmental Variables # ################################################################################ # Default settings for command line arguments (option, description) DEF_CMD_MAX_TRIALS = (infinity, "inf/until preempted") DEF_CMD_WORKER_TRIALS = (infinity, "inf/until preempted") DEF_CMD_POOL_SIZE = (1, str(1)) # list containing tuples of # (environmental variable names, configuration keys, default values) ENV_VARS_DB = [ ("ORION_DB_NAME", "name"), ("ORION_DB_TYPE", "type"), ("ORION_DB_ADDRESS", "host"), ("ORION_DB_PORT", "port"), ] # TODO: Default resource from environmental (localhost) # dictionary describing lists of environmental tuples (e.g. `ENV_VARS_DB`) # by a 'key' to be used in the experiment's configuration dict ENV_VARS = dict(database=ENV_VARS_DB) def _convert_dashes(config, ref): """Convert dash in keys to underscores based on a reference dict. The reference is used to avoid converting keys in dictionary that are values of options. """ config = copy.deepcopy(config) for key in list(config.keys()): converted_key = key.replace("-", "_") if converted_key in ref: config[converted_key] = config.pop(key) if all(isinstance(item[converted_key], dict) for item in [config, ref]): config[converted_key] = _convert_dashes( config[converted_key], ref[converted_key] ) return config # NOTE: Silencing this pylint error for now, but seriously this function is quite horrible. # We'll need to clean this up at some point... # pylint:disable=too-many-branches def fetch_config_from_cmdargs(cmdargs): """Turn flat cmdargs into nested dicts like orion.core.config.""" config_file = cmdargs.pop("config", None) tmp_cmdargs = copy.deepcopy(cmdargs) tmp_cmdargs["config"] = config_file cmdargs["config"] = config_file cmdargs = tmp_cmdargs cmdargs_config = {} if cmdargs.get("max_trials") is not None: log.warning( "--max-trials is deprecated and will be removed in v0.3. " "Use --exp-max-trials instead" ) cmdargs_config["experiment.max_trials"] = cmdargs.pop("max_trials") if cmdargs.get("worker_trials") is not None: log.warning( "--worker-trials is deprecated and will be removed in v0.3. " "Use --worker-max-trials instead" ) cmdargs_config["worker.max_trials"] = cmdargs.pop("worker_trials") mappings = dict( experiment=dict(max_broken="exp_max_broken", max_trials="exp_max_trials"), worker=dict(max_broken="worker_max_broken", max_trials="worker_max_trials"), evc=dict(enable="enable_evc"), ) global_config = orion.core.config.to_dict() for key in ["config", "user_args"]: if cmdargs.get(key) not in [False, None]: cmdargs_config[key] = cmdargs[key] for key in ["name", "user", "version"]: if cmdargs.get(key) not in [False, None]: cmdargs_config[f"experiment.{key}"] = cmdargs[key] for key in ["branch_from", "branch_to"]: if cmdargs.get(key) not in [False, None]: cmdargs_config[f"evc.{key}"] = cmdargs[key] # Apply config at the root for key in ["debug"]: # Adapt to cli arguments cli_key = mappings.get(key, key) value = cmdargs.pop(cli_key, None) if value is not None: cmdargs_config[f"{key}"] = value # Apply to subconfigs for key in ["experiment", "worker", "evc"]: for subkey in global_config[key].keys(): # Adapt to cli arguments cli_key = mappings.get(key, {}).get(subkey, subkey) value = cmdargs.pop(cli_key, None) if value is not None: cmdargs_config[f"{key}.{subkey}"] = value return unflatten(cmdargs_config) def fetch_config(args): """Return the config inside the .yaml file if present.""" orion_file = args.get("config") local_config = {} if orion_file: log.debug( "Found orion configuration file at: %s", os.path.abspath(orion_file.name) ) orion_file.seek(0) tmp_config = yaml.safe_load(orion_file) global_config = orion.core.config.to_dict() tmp_config = _convert_dashes(tmp_config, global_config) # Fix deprecations first because some names are shared by experiment and worker max_trials = tmp_config.pop("max_trials", None) if max_trials is not None: log.warning( "(DEPRECATED) Option `max_trials` is deprecated " "and will be removed in v0.3. Use instead the option" "\nexperiment:\n max_trials: %s", max_trials, ) local_config["experiment.max_trials"] = max_trials worker_trials = tmp_config.get("experiment", {}).pop("worker_trials", None) if worker_trials is not None: log.warning( "(DEPRECATED) Option `experiment.worker_trials` is deprecated " "and will be removed in v0.3. Use instead the option" "\nworker:\n max_trials: %s", worker_trials, ) local_config["worker.max_trials"] = worker_trials worker_trials = tmp_config.pop("worker_trials", None) if worker_trials is not None: log.warning( "(DEPRECATED) Option `worker_trials` is deprecated " "and will be removed in v0.3. Use instead the option" "\nworker:\n max_trials: %s", worker_trials, ) local_config["worker.max_trials"] = worker_trials producer = tmp_config.pop("producer", None) if producer is not None: log.warning( "(DEPRECATED) Option `producer` is deprecated " "and will be removed in v0.3. Use instead the option" "\nexperiment:\n strategy: %s", producer["strategy"], ) local_config["experiment.strategy"] = producer["strategy"] producer = tmp_config.get("experiment", {}).pop("producer", None) if producer is not None: log.warning( "(DEPRECATED) Option `experiment.producer` is deprecated " "and will be removed in v0.3. Use instead the option" "\nexperiment:\n strategy: %s", producer["strategy"], ) local_config["experiment.strategy"] = producer["strategy"] local_config = unflatten(local_config) # For backward compatibility for key in ["storage", "experiment", "worker", "evc"]: subkeys = list(global_config[key].keys()) # Arguments that are only supported locally if key == "experiment": subkeys += ["name", "version", "user"] elif key == "evc": subkeys += ["branch_from", "branch_to"] for subkey in subkeys: # Backward compatibility backward_value = tmp_config.pop(subkey, None) if backward_value is not None: log.warning( "(DEPRECATED) Option `%s` and will be removed in v0.3. " "Use instead the option" "\n%s:\n %s:\n %s", subkey, key, subkey, yaml.dump(backward_value, indent=6), ) value = tmp_config.get(key, {}).pop(subkey, backward_value) if value is not None: local_config.setdefault(key, {}) local_config[key][subkey] = value return local_config def fetch_env_vars(): """Fetch environmental variables related to orion's managerial data.""" env_vars = {} for signif, evars in ENV_VARS.items(): env_vars[signif] = {} for var_name, key in evars: value = os.getenv(var_name) if value is not None: env_vars[signif][key] = value return env_vars def fetch_metadata(user=None, user_args=None, user_script_config=None): """Infer rest information about the process + versioning""" metadata = {"user": user if user else getpass.getuser()} metadata["orion_version"] = orion.core.__version__ if user_args is None: user_args = [] # Trailing white space are catched by argparse as an empty argument if len(user_args) == 1 and user_args[0] == "": user_args = [] if user_script_config is None: user_script_config = orion.core.config.worker.user_script_config cmdline_parser = OrionCmdlineParser(user_script_config) cmdline_parser.parse(user_args) if cmdline_parser.user_script: # TODO: Remove this, it is all in cmdline_parser now metadata["user_script"] = cmdline_parser.user_script metadata["VCS"] = infer_versioning_metadata(cmdline_parser.user_script) if user_args: metadata["user_args"] = user_args metadata["parser"] = cmdline_parser.get_state_dict() metadata["user_script_config"] = user_script_config metadata["priors"] = dict(cmdline_parser.priors) return metadata def update_metadata(metadata): """Update information about the process + versioning""" metadata.setdefault("user", getpass.getuser()) metadata["orion_version"] = orion.core.__version__ if not metadata.get("user_args"): return metadata cmdline_parser = OrionCmdlineParser() cmdline_parser.set_state_dict(metadata["parser"]) if cmdline_parser.user_script: # TODO: Remove this, it is all in cmdline_parser now metadata["user_script"] = cmdline_parser.user_script metadata["VCS"] = infer_versioning_metadata(cmdline_parser.user_script) return metadata def merge_configs(*configs, differentiators=("type",)): """Merge configuration dictionaries following the given hierarchy Suppose function is called as merge_configs(A, B, C). Then any pair (key, value) in C would overwrite any previous value from A or B. Same apply for B over A. If for some pair (key, value), the value is a dictionary, then it will either overwrite previous value if it was not also a directory, or it will be merged following `merge_configs(old_value, new_value)`. .. warning: Redefinition of subdictionaries may lead to confusing results because merges do not remove data. If for instance, we have {'a': {'b': 1, 'c': 2}} and we would like to update `'a'` such that it only have `{'c': 3}`, it won't work with {'a': {'c': 3}}. merge_configs({'a': {'b': 1, 'c': 2}}, {'a': {'c': 3}}) -> {'a': {'b': 1, 'c': 3}} Examples -------- .. code-block:: python :linenos: a = {'a': 1, 'b': {'c': 2}} b = {'b': {'c': 3}} c = {'b': {'c': {'d': 4}}} m = resolve_config.merge_configs(a, b, c) assert m == {'a': 1, 'b': {'c': {'d': 4}}} a = {'a': 1, 'b': {'c': 2, 'd': 3}} b = {'b': {'c': 4}} c = {'b': {'c': {'e': 5}}} m = resolve_config.merge_configs(a, b, c) assert m == {'a': 1, 'b': {'c': {'e': 5}, 'd': 3}} """ merged_config = configs[0] def _can_be_merged(dict_a, dict_b): for differentiator in differentiators: if dict_a.get(differentiator, None) and dict_a[ differentiator ] != dict_b.get(differentiator, None): return False return True for config_i in configs[1:]: for key, value in config_i.items(): if ( isinstance(value, dict) and isinstance(merged_config.get(key), dict) and _can_be_merged(merged_config[key], value) ): merged_config[key] = merge_configs( merged_config[key], value, differentiators=differentiators ) elif value is not None: merged_config[key] = value return merged_config def fetch_user_repo(user_script): """Fetch the GIT repo and its root path given user's script.""" dir_path = os.path.dirname(os.path.abspath(user_script)) try: git_repo = git.Repo(dir_path, search_parent_directories=True) except git.exc.InvalidGitRepositoryError: git_repo = None logging.warning( "Script %s is not in a git repository. Code modification " "won't be detected.", os.path.abspath(user_script), ) return git_repo def infer_versioning_metadata(user_script): """ Infer information about user's script versioning if available. Fills the following information in VCS: `is_dirty` shows whether the git repo is at a clean state. `HEAD_sha` gives the hash of head of the repo. `active_branch` shows the active branch of the repo. `diff_sha` shows the hash of the diff in the repo. :returns: the `VCS` but filled with above info. """ git_repo = fetch_user_repo(user_script) if not git_repo: return {} if not git_repo.head.is_valid(): logging.warning( f"Repository at {git_repo.git.rev_parse('--show-toplevel')} has an invalid HEAD. " "No commits maybe?" ) return {} vcs = {} vcs["type"] = "git" vcs["is_dirty"] = git_repo.is_dirty() vcs["HEAD_sha"] = git_repo.head.object.hexsha if git_repo.head.is_detached: vcs["active_branch"] = None else: vcs["active_branch"] = git_repo.active_branch.name # The 'diff' of the current version from the latest commit diff = git_repo.git.diff(git_repo.head.commit.tree).encode("utf-8") diff_sha = hashlib.sha256(diff).hexdigest() vcs["diff_sha"] = diff_sha return vcs
import copy import json import os import random import ssl import sys from binascii import b2a_hex from datetime import datetime from json import JSONDecodeError import click import requests from apscheduler.schedulers.blocking import BlockingScheduler from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import serialization, hashes from cryptography.hazmat.primitives.asymmetric import ec from cryptography.hazmat.primitives.asymmetric.ec import EllipticCurvePublicKey, EllipticCurvePrivateKey from cryptography.hazmat.primitives.kdf.hkdf import HKDF from cryptography.hazmat.primitives.serialization import load_pem_private_key, load_pem_public_key from paho.mqtt import client as paho from tinydb import where, Query from tinydb.operations import set, delete sys.stdout = open(os.devnull, 'w') sys.path.insert(0, '../app') sys.stdout = sys.__stdout__ try: # for packaged CLI (setup.py) from client.crypto_utils import correctness_hash, check_correctness_hash, int_to_bytes, instantiate_ope_cipher, int_from_bytes, hex_to_key, \ key_to_hex, hex_to_fernet, hex_to_ope, decrypt_using_fernet_hex, decrypt_using_ope_hex, encrypt_using_fernet_hex, murmur_hash, \ decrypt_using_abe_serialized_key, blind_index, unpad_row, pad_payload_attr, unpad_payload_attr from client.utils import json_string_with_bytes_to_dict, _create_payload, search_tinydb_doc, get_tinydb_table, insert_into_tinydb, \ get_shared_key_by_device_id, bytes_to_json, is_number from client.password_hashing import pbkdf2_hash except ImportError: # pragma: no un-packaged CLI cover from crypto_utils import correctness_hash, check_correctness_hash, instantiate_ope_cipher, int_from_bytes, hex_to_key, key_to_hex, \ hex_to_fernet, hex_to_ope, decrypt_using_fernet_hex, decrypt_using_ope_hex, encrypt_using_fernet_hex, murmur_hash, \ decrypt_using_abe_serialized_key, blind_index, unpad_row, pad_payload_attr, unpad_payload_attr from utils import json_string_with_bytes_to_dict, _create_payload, search_tinydb_doc, get_tinydb_table, insert_into_tinydb, \ get_shared_key_by_device_id, bytes_to_json, is_number from password_hashing import pbkdf2_hash URL_BASE = "https://localhost/api/" URL_PUBLISH = URL_BASE + "publish" URL_CREATE_DEVICE_TYPE = URL_BASE + "device_type/create" URL_CREATE_DEVICE = URL_BASE + "device/create" URL_CREATE_SCENE = URL_BASE + "scene/create" URL_ADD_ACTION_TO_SCENE = URL_BASE + "scene/add_action" URL_SET_ACTION = URL_BASE + "device/set_action" URL_TRIGGER_ACTION = URL_BASE + "device/action" URL_TRIGGER_SCENE = URL_BASE + "scene/trigger" URL_AUTHORIZE_USER = URL_BASE + "device/authorize" URL_REVOKE_USER = URL_BASE + "device/revoke" URL_GET_DEVICE = URL_BASE + "device/get" URL_GET_DEVICE_DATA_BY_RANGE = URL_BASE + "data/get_by_num_range" URL_GET_DEVICE_DATA = URL_BASE + "data/get_device_data" URL_START_KEY_EXCHANGE = URL_BASE + "exchange_session_keys" URL_RECEIVE_PUBLIC_KEY = URL_BASE + "retrieve_public_key" URL_REGISTER_TO_BROKER = URL_BASE + "user/broker_register" URL_DELETE_ACCOUNT = "https://localhost/delete_account" AA_URL_BASE = "https://localhost/attr_auth/" AA_URL_SET_USERNAME = AA_URL_BASE + "set_username" AA_URL_DELETE_ACCOUNT = AA_URL_BASE + "delete_account" AA_URL_SETUP = AA_URL_BASE + "setup" AA_URL_KEYGEN = AA_URL_BASE + "user/keygen" AA_URL_DEVICE_KEYGEN = AA_URL_BASE + "device/keygen" AA_URL_SK_RETRIEVE = AA_URL_BASE + "user/retrieve_private_keys" AA_URL_ENCRYPT = AA_URL_BASE + "encrypt" AA_URL_DECRYPT = AA_URL_BASE + "decrypt" dir_path = os.path.dirname(os.path.realpath(__file__)) path = f'{dir_path}/keystore.json' fake_tuple_data = None @click.group() @click.pass_context def user(ctx): global VERIFY_CERTS global MQTT_BROKER global MQTT_PORT VERIFY_CERTS = ctx.obj['VERIFY_CERTS'] MQTT_BROKER = ctx.obj['BROKER'] MQTT_PORT = ctx.obj['PORT'] @user.command() @click.argument('user_id') @click.argument('device_id') @click.argument('data') def send_message(user_id, device_id, data): doc = search_tinydb_doc(path, 'device_keys', Query().device_id == device_id) if not doc: with click.Context(send_key_to_device) as ctx: click.echo(f"Keys for device {device_id} not present, please use: {ctx.command.name}") click.echo(send_key_to_device.get_help(ctx)) return fernet_key = hex_to_fernet(doc["shared_key"]) token = fernet_key.encrypt(data.encode()) client = _setup_client(user_id) payload = f'"{json.dumps(_create_payload({'ciphertext': token.decode()}, user_id))}"' ret = client.publish(f"u:{user_id}/d:{device_id}/", payload) click.echo(f"RC and MID = {ret}") @user.command() @click.argument('password') @click.option('--token', envvar='ACCESS_TOKEN') def register_to_broker(password, token): password_hash = pbkdf2_hash(password) data = {"password": password_hash} r = requests.post(URL_REGISTER_TO_BROKER, headers={"Authorization": token}, data=data, verify=VERIFY_CERTS) content = json.loads(r.content.decode('unicode-escape')) table = get_tinydb_table(path, 'credentials') table.upsert({ "broker_id": content["broker_id"], "broker_password": password }, Query().broker_id == content["broker_id"]) click.echo(r.content.decode('unicode-escape')) @user.command() @click.option('--token', envvar='ACCESS_TOKEN') @click.option('--aa/--server') def delete_account(token, aa): """Triggered by: ./cli.py -b "172.26.0.8" user delete-account --token "7jagPr4edVdghcsBNkjd23))" --aa ./cli.py -b "172.26.0.8" user delete-account --token 5c36ab84439c55a3c196f4csd9bd7b3d9291f39g --server """ if aa: url = AA_URL_DELETE_ACCOUNT else: url = URL_DELETE_ACCOUNT r = requests.post(url, headers={"Authorization": token}, verify=VERIFY_CERTS) content = json.loads(r.content.decode('unicode-escape')) click.echo(content) @user.command() @click.argument('description') @click.option('--token', envvar='ACCESS_TOKEN') def create_device_type(description, token): if len(get_tinydb_table(path, 'device_type_keys')) == 0: init_device_type_keys() table = get_tinydb_table(path, 'device_type_keys') doc = table.all()[0] desc_ciphertext = encrypt_using_fernet_hex(doc["description"], description) data = {"description": desc_ciphertext, "correctness_hash": correctness_hash(description)} r = requests.post(URL_CREATE_DEVICE_TYPE, headers={"Authorization": token}, data=data, verify=VERIFY_CERTS) click.echo(r.content.decode('unicode-escape')) @user.command() @click.argument('device_type_id') @click.argument('device_name') @click.argument('password') @click.option('--token', envvar='ACCESS_TOKEN') def create_device(device_type_id, device_name, password, token): password_hash = pbkdf2_hash(password) bi_key = os.urandom(32) device_name_key = key_to_hex(os.urandom(32)) # NOTE: retrieve key as `key_to_hex(key)` device_status_key = key_to_hex(os.urandom(32)) data = { "type_id": device_type_id, "name": hex_to_fernet(device_name_key).encrypt(device_name.encode()), "correctness_hash": correctness_hash(device_name), "name_bi": blind_index(bi_key, device_name), "password": password_hash } r = requests.post(URL_CREATE_DEVICE, headers={"Authorization": token}, data=data, verify=VERIFY_CERTS) content = json.loads(r.content.decode('unicode-escape')) if content["success"]: insert_into_tinydb(path, 'device_keys', { 'device_id': str(content["id"]), 'bi_key': key_to_hex(bi_key), 'device:name': device_name_key, 'device:status': device_status_key }) click.echo(r.content.decode('unicode-escape')) @user.command() @click.argument('name') @click.argument('description') @click.option('--token', envvar='ACCESS_TOKEN') def create_scene(name, description, token): if not is_global_bi_key_missing(init_global_keys, "Blind index key for scene name is missing"): if len(get_tinydb_table(path, 'scene_keys')) == 0: init_scene_keys() table = get_tinydb_table(path, 'scene_keys') doc = table.all()[0] name_ciphertext = encrypt_using_fernet_hex(doc["name"], name) desc_ciphertext = encrypt_using_fernet_hex(doc["description"], description) data = { "name": name_ciphertext, "correctness_hash": correctness_hash(name), "name_bi": blind_index(get_global_bi_key(), name), "description": desc_ciphertext } r = requests.post(URL_CREATE_SCENE, headers={"Authorization": token}, data=data, verify=VERIFY_CERTS) click.echo(r.content.decode('unicode-escape')) def init_scene_keys(): table = get_tinydb_table(path, 'scene_keys') table.upsert({ 'name': key_to_hex(os.urandom(32)), 'description': key_to_hex(os.urandom(32)) }, where('name').exists() & where('description').exists()) def init_device_type_keys(): table = get_tinydb_table(path, 'device_type_keys') table.upsert({ 'description': key_to_hex(os.urandom(32)), }, where('description').exists()) @user.command() def init_global_keys(): table = get_tinydb_table(path, 'global') table.upsert({ 'bi_key': key_to_hex(os.urandom(32)), 'scene_key': key_to_hex(os.urandom(32)), }, where('bi_key').exists() & where('scene_key').exists()) @user.command() @click.argument('scene_name') @click.argument('action_name') @click.argument('device_id') @click.option('--token', envvar='ACCESS_TOKEN') def add_scene_action(scene_name, action_name, device_id, token): if not is_global_bi_key_missing(create_device, "Blind index key for scene name is missing"): data = { "scene_name_bi": blind_index(get_global_bi_key(), scene_name), "action_name_bi": blind_index(get_device_bi_key(device_id), action_name), } r = requests.post(URL_ADD_ACTION_TO_SCENE, headers={"Authorization": token}, data=data, verify=VERIFY_CERTS) click.echo(r.content.decode('unicode-escape')) @user.command() @click.argument('device_id') @click.argument('name') @click.option('--token', envvar='ACCESS_TOKEN') def set_action(device_id, name, token): doc = search_tinydb_doc(path, 'device_keys', Query().device_id == str(device_id)) if not doc: with click.Context(send_column_keys) as ctx: click.echo(f"Keys for device {device_id} not present, please use: {ctx.command.name}") click.echo(send_column_keys.get_help(ctx)) return data = { "device_id": device_id, "name": encrypt_using_fernet_hex(doc["action:name"], name), "correctness_hash": correctness_hash(name), "name_bi": blind_index(get_device_bi_key(device_id), name) } r = requests.post(URL_SET_ACTION, headers={"Authorization": token}, data=data, verify=VERIFY_CERTS) click.echo(r.content.decode('unicode-escape')) @user.command() @click.argument('device_id') @click.argument('device_name') @click.argument('name') @click.option('--token', envvar='ACCESS_TOKEN') @click.option('--real/--fake', default=True) @click.option('--owner/--no-owner', default=True) def trigger_action(device_id, device_name, name, token, real, owner): if owner: r = _trigger_action_by_owner(device_id, device_name, name, token, real) else: r = _trigger_action_by_nonowner(device_id, device_name, name, token) click.echo(r.content.decode('unicode-escape')) def _trigger_action_by_owner(device_id, device_name, name, token, real): data = { "device_name_bi": blind_index(get_device_bi_key(device_id), device_name), "name_bi": blind_index(get_device_bi_key(device_id), name), } if real: data["additional_data"] = "real" else: data["additional_data"] = "fake" data["additional_data"] = encrypt_using_fernet_hex(get_shared_key_by_device_id(path, device_id), data["additional_data"]).decode() return requests.get(URL_TRIGGER_ACTION, headers={"Authorization": token}, params=data, verify=VERIFY_CERTS) def _trigger_action_by_nonowner(device_id, device_name, name, token): data = { "device_name_bi": blind_index(get_device_bi_key(device_id), device_name), "name_bi": blind_index(get_device_bi_key(device_id), name), "additional_data": b2a_hex(os.urandom(32)).decode() } return requests.get(URL_TRIGGER_ACTION, headers={"Authorization": token}, params=data, verify=VERIFY_CERTS) @user.command() @click.argument('device_id') @click.argument('device_name') @click.argument('start', type=click.DateTime()) @click.argument('end', type=click.DateTime()) @click.argument('number', type=int) @click.argument('action_names', nargs=-1) @click.option('--token', envvar='ACCESS_TOKEN') def schedule_fake_actions(device_id, device_name, start, end, number, action_names, token): if (start < datetime.now() or end < datetime.now()) or start > end: click.echo("Invalid start or end time supplied.") return td = end - start times = sorted([random.random() * td for _ in range(number)]) actions = random.choices(action_names, k=number) sched = BlockingScheduler() for t, a in zip(times, actions): sched.add_job(_trigger_action_by_owner, 'date', [device_id, device_name, a, token, False], run_date=start+t) sched.start() @user.command() @click.argument('name') @click.option('--token', envvar='ACCESS_TOKEN') @click.option('--real/--fake', default=True) def trigger_scene(name, token, real): data = { "name_bi": blind_index(get_global_bi_key(), name) } if real: data["additional_data"] = "real" else: data["additional_data"] = "fake" data["additional_data"] = encrypt_using_fernet_hex(key_to_hex(get_global_scene_key()), data["additional_data"]).decode() r = requests.get(URL_TRIGGER_SCENE, headers={"Authorization": token}, params=data, verify=VERIFY_CERTS) click.echo(r.content.decode('unicode-escape')) @user.command() @click.argument('device_id') @click.argument('device_name') @click.argument('auth_user_id') @click.option('--token', envvar='ACCESS_TOKEN') def authorize_user(device_id, device_name, auth_user_id, token): data = { "device_name_bi": blind_index(get_device_bi_key(device_id), device_name), "auth_user_id": auth_user_id } r = requests.post(URL_AUTHORIZE_USER, headers={"Authorization": token}, data=data, verify=VERIFY_CERTS) click.echo(r.content.decode('unicode-escape')) @user.command() @click.argument('device_id') @click.argument('device_name') @click.argument('revoke_user_id') @click.option('--token', envvar='ACCESS_TOKEN') def revoke_user(device_id, device_name, revoke_user_id, token): data = { "device_name_bi": blind_index(get_device_bi_key(device_id), device_name), "revoke_user_id": revoke_user_id } r = requests.post(URL_REVOKE_USER, headers={"Authorization": token}, data=data, verify=VERIFY_CERTS) click.echo(r.content.decode('unicode-escape')) @user.command() @click.argument('device_name') @click.argument('device_id') @click.option('--token', envvar='ACCESS_TOKEN') def get_devices(device_name, device_id, token): """Triggered using: ./cli.py -b "172.26.0.8" user get-devices test_device 46 --token 5c36ab84439c55a3c196f4csd9bd7b3d9291f39g""" device_name_bi = blind_index(get_device_bi_key(device_id), device_name) data = {"name_bi": device_name_bi} r = requests.get(URL_GET_DEVICE, headers={"Authorization": token}, params=data, verify=VERIFY_CERTS) content = json.loads(r.content.decode('unicode-escape')) table = get_tinydb_table(path, 'device_keys') for device in content["devices"]: ciphertext = device["name"] doc = table.get(Query().device_id == str(device["id"])) plaintext = decrypt_using_fernet_hex(doc["device:name"], ciphertext) device["name"] = plaintext.decode() check_correctness_hash(content["devices"], "name") click.echo(content["devices"]) @user.command() @click.argument('user_id') @click.argument('device_id') @click.argument('device_name') @click.option('--lower', required=False) @click.option('--upper', required=False) @click.option('--token', envvar='ACCESS_TOKEN') def get_device_data_by_num_range(user_id, device_id, device_name, lower=None, upper=None, token=""): if lower is not None and upper is not None and upper <= lower: click.echo("Upper bound needs to be greater then lower bound.") return device_name_bi = blind_index(get_device_bi_key(device_id), device_name) if lower is not None and upper is not None: data = {"lower": int(lower), "upper": int(upper), "device_name_bi": device_name_bi} elif lower is not None and upper is None: upper = 214748364700 # 100000000000 data = {"lower": int(lower), "device_name_bi": device_name_bi} elif lower is None and upper is not None: lower = -214748364800 # -100000000000 data = {"upper": int(upper), "device_name_bi": device_name_bi} else: lower = -214748364800 # -100000000000 upper = 214748364700 # 100000000000 data = {"lower": lower, "upper": upper, "device_name_bi": device_name_bi} r = requests.get(URL_GET_DEVICE_DATA_BY_RANGE, headers={"Authorization": token}, params=data, verify=VERIFY_CERTS) content = r.content.decode('unicode-escape') json_content = json_string_with_bytes_to_dict(content) _get_fake_tuple_data(int(user_id), int(device_id)) decrypted_fake_tuple_data = { "device_data": json.loads(decrypt_using_fernet_hex(get_shared_key_by_device_id(path, device_id), fake_tuple_data["device_data"]).decode())} fake_tuples, rows = _divide_fake_and_real_data(json_content["device_data"], str(device_id), decrypted_fake_tuple_data) generated_tuples = generate_fake_tuples_in_range(decrypted_fake_tuple_data["device_data"]) expected_fake_rows = slice_by_range(generated_tuples, int(lower), int(upper), "device_data:num_data") verify_integrity_data(expected_fake_rows, fake_tuples) if json_content["success"]: check_correctness_hash(rows, 'added', 'data', 'num_data', 'tid') result = [] for row in rows: try: result.append(unpad_row("data", row)) except Exception as e: click.echo(str(e)) click.echo('{"device_data":' + str(result).replace("'", '"') + '}') def slice_by_range(all_tuples, lower, upper, key_name): result = [] for row in all_tuples: if lower <= row[key_name.split(":")[1]] <= upper: result.append(row) return result @user.command() @click.argument('device_id') @click.option('--token', envvar='ACCESS_TOKEN') def send_key_to_device(device_id, token): if not is_device_bi_key_missing(device_id, create_device, "Blind index key for device name is missing"): private_key = ec.generate_private_key(ec.SECP384R1(), default_backend()) public_key = private_key.public_key() public_pem = public_key.public_bytes( encoding=serialization.Encoding.PEM, format=serialization.PublicFormat.SubjectPublicKeyInfo ).decode('utf-8') private_pem = private_key.private_bytes( encoding=serialization.Encoding.PEM, format=serialization.PrivateFormat.TraditionalOpenSSL, encryption_algorithm=serialization.NoEncryption() ).decode() table = get_tinydb_table(path, 'device_keys') table.upsert({ 'device_id': device_id, 'public_key': public_pem, 'private_key': private_pem, 'bi_key': key_to_hex(get_device_bi_key(device_id)) }, Query().device_id == device_id) data = { 'device_id': device_id, 'public_key': public_pem } r = requests.post(URL_START_KEY_EXCHANGE, headers={"Authorization": token}, data=data, verify=VERIFY_CERTS) click.echo(r.content.decode('unicode-escape')) @user.command() @click.argument('device_id') @click.option('--token', envvar='ACCESS_TOKEN') def retrieve_device_public_key(device_id, token): data = { "device_id": device_id } table = get_tinydb_table(path, 'device_keys') doc = table.get(Query().device_id == device_id) if not doc: with click.Context(send_key_to_device) as ctx: click.echo(f"Keys for device {device_id} not present, please use: {ctx.command.name}") click.echo(get_attr_auth_keys.get_help(ctx)) return r = requests.post(URL_RECEIVE_PUBLIC_KEY, headers={"Authorization": token}, data=data, verify=VERIFY_CERTS) if r.status_code != 200: click.echo(r.content.decode('unicode-escape')) return content = r.content.decode('unicode-escape') json_content = json_string_with_bytes_to_dict(content) private_key = load_pem_private_key(doc["private_key"].encode(), password=None, backend=default_backend()) assert isinstance(private_key, EllipticCurvePrivateKey), "Loading private key failed! - private_key is not instance of EllipticCurvePrivateKey" device_public_key = load_pem_public_key(json_content["device_public_key"].encode(), backend=default_backend()) assert isinstance(device_public_key, EllipticCurvePublicKey), "Loading public key failed! - private_key is not instance of EllipticCurvePublicKey" shared_key = private_key.exchange(ec.ECDH(), device_public_key) derived_key = HKDF( algorithm=hashes.SHA256(), length=32, salt=None, info=b'handshake data', backend=default_backend() ).derive(shared_key) key = key_to_hex(derived_key) # NOTE: retrieve key as `key_to_hex(key)` table.update(delete("public_key"), Query().device_id == device_id) table.update(delete("private_key"), Query().device_id == device_id) table.update(set("shared_key", key), Query().device_id == device_id) @user.command() @click.argument('user_id') @click.argument('device_id') @click.argument('policy') def send_column_keys(user_id, device_id, policy): table = get_tinydb_table(path, 'device_keys') doc = table.get(Query().device_id == device_id) if not doc: with click.Context(send_key_to_device) as ctx: click.echo(f"Keys for device {device_id} not present, please use: {ctx.command.name}") click.echo(get_attr_auth_keys.get_help(ctx)) return fernet_key = hex_to_fernet(doc["shared_key"]) keys = { "action:name": None, "device_data:added": None, "device_data:num_data": None, "device_data:tid": None } payload_keys = {} for k in keys: random_bytes = os.urandom(32) keys[k] = key_to_hex(random_bytes) # NOTE: retrieve key as `key_to_hex(key)` payload_keys[k] = fernet_key.encrypt(random_bytes).decode() # payload_keys["device_data:data"] = fernet_key.encrypt(get_aa_public_key().encode()).decode() abe_key_and_policy = json.dumps({ "public_key": get_aa_public_key(), "policy": policy }).encode() payload_keys["device_data:data"] = fernet_key.encrypt(abe_key_and_policy).decode() payload_keys["device:name"] = fernet_key.encrypt(hex_to_key(doc["device:name"])).decode() payload_keys["device:status"] = fernet_key.encrypt(hex_to_key(doc["device:status"])).decode() payload_keys["bi_key"] = fernet_key.encrypt(hex_to_key(doc["bi_key"])).decode() payload_keys["scene_key"] = fernet_key.encrypt(get_global_scene_key()).decode() doc = {**doc, **keys} table.upsert(doc, Query().device_id == device_id) client = _setup_client(user_id) payload = f'"{json.dumps(_create_payload(payload_keys, user_id))}"' ret = client.publish(f"u:{user_id}/d:{device_id}/", payload) click.echo(f"RC and MID = {ret}") @user.command() @click.argument('device_id') @click.argument('attr_list', nargs=-1) @click.option('--token', envvar='AA_ACCESS_TOKEN') def attr_auth_device_keygen(device_id, attr_list, token): if device_id not in " ".join(attr_list): click.echo(f"attr_list argument should contain device_id ({device_id})") return doc = search_tinydb_doc(path, 'aa_keys', where('public_key').exists()) if not doc: with click.Context(get_attr_auth_keys) as ctx: click.echo(f"Public key not present, please use: {ctx.command.name}") click.echo(get_attr_auth_keys.get_help(ctx)) return data = { "attr_list": " ".join(attr_list) } r = requests.post(AA_URL_DEVICE_KEYGEN, headers={"Authorization": token}, data=data, verify=VERIFY_CERTS) content = r.content.decode('unicode-escape') json_content = json_string_with_bytes_to_dict(content) if not json_content["success"]: click.echo(json_content) return t = get_tinydb_table(path, "device_keys") device_data_doc = { "private_key": json_content["private_key"], "attr_list": attr_list, } t.update(set("device_data:data", device_data_doc), Query().device_id == device_id) @user.command() @click.option('--token', envvar='AA_ACCESS_TOKEN') def attr_auth_retrieve_private_keys(token): """Triggered by: ./cli.py -b "172.26.0.8" user attr-auth-retrieve-private-keys --token '7jagPr4edVdgvyyBNkjdaQ))'""" r = requests.post(AA_URL_SK_RETRIEVE, headers={"Authorization": token}, verify=VERIFY_CERTS) click.echo(r.content.decode('unicode-escape')) @user.command() @click.argument('device_id') @click.argument('abe_pk', type=click.Path(exists=True)) @click.argument('bi_key') @click.option('--token', envvar='AA_ACCESS_TOKEN') def setup_authorized_device(device_id, abe_pk, bi_key, token): r = requests.post(AA_URL_SK_RETRIEVE, headers={"Authorization": token}, verify=VERIFY_CERTS) content = json.loads(r.content.decode('unicode-escape')) abe_sk = next((key for key in content['private_keys'] if str(key["device_id"]) == device_id), None) if abe_sk is None: click.echo(f"Key for device: {device_id} is not present.") return del abe_sk["key_update"] del abe_sk["challenger_id"] del abe_sk["device_id"] abe_sk["private_key"] = abe_sk.pop("data") abe_sk["attr_list"] = abe_sk.pop("attributes") data = { "device_data:data": { **abe_sk, "public_key": open(abe_pk).read().strip() }, "device_id": device_id, "bi_key": bi_key, } insert_into_tinydb(path, 'device_keys', data) @user.command() @click.argument('user_id') @click.argument('device_id') @click.argument('device_name') @click.option('--owner/--no-owner', default=True) @click.option('--token', envvar='ACCESS_TOKEN') def get_device_data(user_id, device_id, device_name, owner, token): """ Queries server for data of :param device_id device and then verifies the received data using integrity information from device (received using MQTT Broker) and correctness hash attribute of each DB row. """ user_id = int(user_id) device_name_bi = blind_index(get_device_bi_key(device_id), device_name) data = {"device_name_bi": device_name_bi} r = requests.get(URL_GET_DEVICE_DATA, headers={"Authorization": token}, params=data, verify=VERIFY_CERTS) content = r.content.decode('unicode-escape') json_content = json_string_with_bytes_to_dict(content) if not json_content["success"]: click.echo(json_content["error"]) return if owner: _get_fake_tuple_data(user_id, int(device_id)) decrypted_fake_tuple_data = { "device_data": json.loads(decrypt_using_fernet_hex(get_shared_key_by_device_id(path, device_id), fake_tuple_data["device_data"]).decode())} fake_tuples, rows = _divide_fake_and_real_data(json_content["device_data"], device_id, decrypted_fake_tuple_data) # NOTE: ^ Not checking for ability of user to decrypt (having SK that satisfies Ciphertext) because owner should have keys setup # so that he can decrypt all data from his devices verify_integrity_data(generate_fake_tuples_in_range(decrypted_fake_tuple_data["device_data"]), fake_tuples) check_correctness_hash(rows, 'added', 'data', 'num_data', 'tid') result = [] for row in rows: try: result.append(unpad_row("data", row)) except Exception as e: click.echo(str(e)) click.echo(result) else: get_foreign_device_data(device_id, json_content) def get_foreign_device_data(device_id, data): doc = search_tinydb_doc(path, 'device_keys', Query().device_id == str(device_id)) if not doc: click.echo(f"Keys for device: {device_id} are missing. You are probably not authorized to use it.") decrypted = [] for row in data["device_data"]: try: decrypted.append(decrypt_using_abe_serialized_key(row["data"], doc["device_data:data"]["public_key"], doc["device_data:data"]["private_key"])) except: click.echo("Cannot decrypt row.") result = [] for val in decrypted: try: if val.endswith("0"): result.append(unpad_payload_attr(val)) except Exception as e: click.echo(str(e)) click.echo(result) def _handle_on_message(mqtt_client, userdata, msg, device_id, user_id): try: msg.payload = bytes_to_json(msg.payload) except JSONDecodeError: click.echo(f"Received invalid payload: {msg.payload.decode()}") return topic = msg.topic.split("/") t_sender, sender_id = topic[0].split(":") t_receiver, receiver_id = topic[1].split(":") if t_sender == "d" and t_receiver == "u": if is_number(sender_id) and int(sender_id) == device_id and is_number(receiver_id) and int(receiver_id) == user_id: mqtt_client.disconnect() global fake_tuple_data fake_tuple_data = msg.payload return click.echo(f"Received invalid topic: {msg.topic}") def _get_fake_tuple_data(user_id, device_id): payload_dict = {"request": "fake_tuple_info"} def on_message(mqtt_client, userdata, msg): _handle_on_message(mqtt_client, userdata, msg, device_id, user_id) client = _setup_client(str({user_id})) payload = f'"{json.dumps(_create_payload(payload_dict, user_id))}"' sub_topic = f"d:{device_id}/u:{user_id}/" client.subscribe(sub_topic) client.publish(f"u:{user_id}/d:{device_id}/", payload) client.on_message = on_message click.echo(f"Subscribed to {sub_topic}") click.echo("Waiting for response, CTRL-C to terminate...") client.loop_forever() def _divide_fake_and_real_data(rows, device_id, integrity_info): """ Split data into 2 lists based on 'fakeness' Decrypts each row and computes fake correctness hash, then tests (using bcrypt) whether `correctness_hash` of row is 'same' as computed fake correctness hash :param device_id :param rows: example: [{ 'added': 37123, 'correctness_hash': '$2b$12$FSuBaNwezizWJcj47RxYJOpur2k49IJObfIPLDce5pKpRRZEASt6m', 'data': 'gAAAAABcUECMQMM0MjKknugGdI6YN81pLtmLUrcMsjHMBG87KpIJFWZF8n1DTVJX7VvnlVMMN4BNGdVROLeCD_I0XUs0IAK9AA==', 'device_id': 23, 'id': 1, 'num_data': -9199, 'tid': 3}, ...] """ db_col_names = ["device_data:added", "device_data:data", "device_data:tid", "device_data:num_data"] enc_keys = get_encryption_keys(device_id, db_col_names) col_types = {col: get_col_encryption_type(col, integrity_info) for col in db_col_names} key_type_pairs = {} for k, v in enc_keys.items(): if ":" in k: if k == "device_data:data": key_type_pairs[k.split(":")[1]] = [enc_keys[k][0], col_types[k], enc_keys[k][1]] # public, type, private else: key_type_pairs[k.split(":")[1]] = [enc_keys[k], col_types[k]] # private, type real, fake = [], [] for row in rows: modified = row modified.pop("id") modified.pop("device_id") modified.pop("tid_bi") row_correctness_hash = modified.pop("correctness_hash") decrypted = decrypt_row(modified, key_type_pairs) if is_fake(decrypted): decrypted["correctness_hash"] = row_correctness_hash fake.append(decrypted) else: decrypted["correctness_hash"] = row_correctness_hash real.append(decrypted) return fake, real def get_encryption_keys(device_id, db_keys): """ Retrieves encryption (decryption) keys corresponding to :param db_keys from TinyDB file :param device_id :param db_keys: list of TinyDB key names, e.g.: ["device_type:description", "action:name"] :return: dictionary of key, value pair of column name and encryption string, e.g.: {"action:name": "9dd1a57836a5...858372a8c0c42515", ...} """ doc = search_tinydb_doc(path, 'device_keys', Query().device_id == str(device_id)) result = {} for key in db_keys: if ":" in key: if key == "device_data:data": result[key] = [get_aa_public_key(), doc[key]["private_key"]] else: result[key] = doc[key] return result def get_col_encryption_type(col_name, integrity_info): """ Returns type based on whether the column is encrypted as number (OPE) or symmetrically (Fernet) or asymmetrically (ABE)- this is based on "type" attribute in TinyDB :param integrity_info: { 'device_data': { 'added': { 'seed': 12312412, 'lower_bound': 1, 'upper_bound': 1, 'type': "OPE" }}} :param col_name: e.g. "device_data:data" :return: """ table, col = col_name.split(":") return integrity_info[table][col]["type"] def decrypt_row(row, keys): """ :param row: example: { "added": 36976, "num_data": -9272, "data": "gAAAAABcTyUFZrhQRLzLvwep7j0Vm2UFjS2ylZ7bjB2YRueDpX15tobA0oOSEWBYZ4LaCKRa_h7WyKMacAAt-982srPPOR_1Cw==", "tid": 1 } :param keys: example: "added": ["26751017213ff85f189bedc34d302acfdf1649d5e1bac653a9709171ad37b155", "OPE"], "num_data": ["84964a963c097c550b41a085bbf1ad93ba5a1046aa5495d86d62f9623ab89cc6", "OPE"], "data": ["1fac0f8fa2083fe32c21d0PUBLIC_KEYf6959afb9f44623048e6875", "ABE", "rgesdrgPRIVATE_KEYedhder"] } """ result = {} for col, val in row.items(): if keys[col][1] == "Fernet": # if key is for fernet, create Fernet token result[col] = decrypt_using_fernet_hex(keys[col][0], val).decode() elif keys[col][1] == "OPE": # if key is for OPE, create OPE cipher result[col] = decrypt_using_ope_hex(keys[col][0], val) else: result[col] = decrypt_using_abe_serialized_key(val, keys[col][0], keys[col][2]) return result def is_fake(row_values): """ Check whether row is fake tuple based on "tid" attribute in row and computed hash from other attributes in row. :param row_values: dict with keys as column names and values as values from server DB (decrypted using `decrypt_row`) :return: bool """ return int(row_values["tid"]) >= 0 # Positive numbers are fake def verify_integrity_data(expected_tuples, present_rows): """ :param expected_tuples: list of dicts with keys as column names and values as values from server DB (generated fake tuples, that should be present in DB) :param present_rows: list of dicts with keys as column names and values as values from server DB (queried tuples) :return: False if any of the rows does not satisfy 'fakeness' check of if there less/more fake rows than there should be """ modified = copy.deepcopy(present_rows) for i, row in enumerate(modified): modified[i].pop("correctness_hash") if expected_tuples == modified: click.echo("Data Integrity satisfied.") else: click.echo("Data Integrity NOT satisfied.") def generate_fake_tuples_in_range(fake_tuple_info): """ Generates all fake tuples in <"lower_bound", "upper_bound"> range and verifies them againts :param fake_rows. :param fake_tuple_info: example: { "added": { "seed": 4574675, "lower_bound": 5, "upper_bound": 11, "is_numeric": True } :return: list of dicts (each dict contains single tuple with keys as column names and values) """ fake_tuple_col_values = {} fake_tuples = [] lb, ub = 0, 0 for col, val in fake_tuple_info.items(): lb = fake_tuple_info[col]["lower_bound"] ub = fake_tuple_info[col]["upper_bound"] if "seed" in fake_tuple_info[col]: fake_tuple_col_values[col] = [murmur_hash(str(i), fake_tuple_info[col]["seed"]) for i in range(lb, ub)] else: fake_tuple_col_values[col] = list(range(lb, ub)) for no, i in enumerate(range(lb, ub)): fake_tuples.append({"added": fake_tuple_col_values["added"][no], "num_data": fake_tuple_col_values["num_data"][no], "data": pad_payload_attr(str(fake_tuple_col_values["data"][no]), fake=True), "tid": str(fake_tuple_col_values["tid"][no])}) return fake_tuples def _setup_client(user_id): def on_publish(client, userdata, result): click.echo("Data published") client = paho.Client(user_id) client.on_publish = on_publish client.tls_set(ca_certs=os.path.join(os.path.dirname(__file__), "certs/server.crt"), certfile=None, keyfile=None, tls_version=ssl.PROTOCOL_TLSv1_2) client.tls_insecure_set(True) doc = search_tinydb_doc(path, 'credentials', where('broker_id').exists() & where('broker_password').exists()) client.username_pw_set(f"u:{doc["broker_id"]}", doc['broker_password']) client.connect(MQTT_BROKER, MQTT_PORT, 30) return client @user.command() @click.argument('username') @click.option('--token', envvar='AA_ACCESS_TOKEN') def attr_auth_set_api_username(username, token): data = {"api_username": username} r = requests.post(AA_URL_SET_USERNAME, headers={"Authorization": token}, data=data, verify=VERIFY_CERTS) click.echo(r.content.decode('unicode-escape')) @user.command() @click.option('--token', envvar='AA_ACCESS_TOKEN') def get_attr_auth_keys(token): r = requests.get(AA_URL_SETUP, headers={"Authorization": token}, verify=VERIFY_CERTS) content = json.loads(r.content.decode('unicode-escape')) click.echo(f"Saving keys to {path}") table = get_tinydb_table(path, 'aa_keys') doc = table.get(where('public_key').exists()) data = {"public_key": content["public_key"]} if doc: table.update(data) else: table.insert(data) @user.command() @click.argument('api_username') @click.argument('device_id') @click.argument('attr_list', nargs=-1) @click.option('--token', envvar='AA_ACCESS_TOKEN') def attr_auth_keygen(api_username, device_id, attr_list, token): doc = search_tinydb_doc(path, 'aa_keys', where('public_key').exists()) if not doc: with click.Context(get_attr_auth_keys) as ctx: click.echo(f"Public key not present, please use: {ctx.command.name}") click.echo(get_attr_auth_keys.get_help(ctx)) return data = { "attr_list": " ".join(attr_list), "api_username": api_username, "device_id": device_id } r = requests.post(AA_URL_KEYGEN, headers={"Authorization": token}, data=data, verify=VERIFY_CERTS) click.echo(r.content.decode('unicode-escape')) @user.command() @click.argument('message') @click.argument('policy_string') @click.option('--token', envvar='AA_ACCESS_TOKEN') def attr_auth_encrypt(message, policy_string, token): data = { "message": message, "policy_string": policy_string } r = requests.get(AA_URL_ENCRYPT, headers={"Authorization": token}, params=data, verify=VERIFY_CERTS) click.echo(r.content.decode('unicode-escape')) @user.command() @click.argument('owner_username') @click.argument('ciphertext') @click.option('--token', envvar='AA_ACCESS_TOKEN') def attr_auth_decrypt(owner_username, ciphertext, token): data = { "api_username": owner_username, "ciphertext": ciphertext } r = requests.get(AA_URL_DECRYPT, headers={"Authorization": token}, params=data, verify=VERIFY_CERTS) click.echo(r.content.decode('unicode-escape')) def is_global_bi_key_missing(command, message): doc = search_tinydb_doc(path, 'global', Query().bi_key.exists()) if not doc: with click.Context(command) as ctx: click.echo(f"{message}, please use: {ctx.command.name}") click.echo(command.get_help(ctx)) return True return False def get_global_bi_key(): table = get_tinydb_table(path, 'global') doc_global = table.all()[0] return hex_to_key(doc_global["bi_key"]) def get_global_scene_key(): table = get_tinydb_table(path, 'global') doc_global = table.all()[0] return hex_to_key(doc_global["scene_key"]) def get_device_bi_key(device_id): doc = search_tinydb_doc(path, 'device_keys', Query().device_id == str(device_id)) return hex_to_key(doc["bi_key"]) def get_aa_public_key(): doc = search_tinydb_doc(path, 'aa_keys', where('public_key').exists()) return doc["public_key"] def is_device_bi_key_missing(device_id, command, message): doc = search_tinydb_doc(path, 'device_keys', Query().device_id == str(device_id)) if doc is None or "bi_key" not in doc: with click.Context(command) as ctx: click.echo(f"{message}, please use: {ctx.command.name}") click.echo(command.get_help(ctx)) return True return False if __name__ == '__main__': # json_data = json.loads(create_device_type("test desc").content.decode("utf-8")) # print(json_data) # print(create_device(json_data["type_id"]).content.decode("utf-8")) # send_message() ...
import copy import json import os import random import ssl import sys from binascii import b2a_hex from datetime import datetime from json import JSONDecodeError import click import requests from apscheduler.schedulers.blocking import BlockingScheduler from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import serialization, hashes from cryptography.hazmat.primitives.asymmetric import ec from cryptography.hazmat.primitives.asymmetric.ec import EllipticCurvePublicKey, EllipticCurvePrivateKey from cryptography.hazmat.primitives.kdf.hkdf import HKDF from cryptography.hazmat.primitives.serialization import load_pem_private_key, load_pem_public_key from paho.mqtt import client as paho from tinydb import where, Query from tinydb.operations import set, delete sys.stdout = open(os.devnull, 'w') sys.path.insert(0, '../app') sys.stdout = sys.__stdout__ try: # for packaged CLI (setup.py) from client.crypto_utils import correctness_hash, check_correctness_hash, int_to_bytes, instantiate_ope_cipher, int_from_bytes, hex_to_key, \ key_to_hex, hex_to_fernet, hex_to_ope, decrypt_using_fernet_hex, decrypt_using_ope_hex, encrypt_using_fernet_hex, murmur_hash, \ decrypt_using_abe_serialized_key, blind_index, unpad_row, pad_payload_attr, unpad_payload_attr from client.utils import json_string_with_bytes_to_dict, _create_payload, search_tinydb_doc, get_tinydb_table, insert_into_tinydb, \ get_shared_key_by_device_id, bytes_to_json, is_number from client.password_hashing import pbkdf2_hash except ImportError: # pragma: no un-packaged CLI cover from crypto_utils import correctness_hash, check_correctness_hash, instantiate_ope_cipher, int_from_bytes, hex_to_key, key_to_hex, \ hex_to_fernet, hex_to_ope, decrypt_using_fernet_hex, decrypt_using_ope_hex, encrypt_using_fernet_hex, murmur_hash, \ decrypt_using_abe_serialized_key, blind_index, unpad_row, pad_payload_attr, unpad_payload_attr from utils import json_string_with_bytes_to_dict, _create_payload, search_tinydb_doc, get_tinydb_table, insert_into_tinydb, \ get_shared_key_by_device_id, bytes_to_json, is_number from password_hashing import pbkdf2_hash URL_BASE = "https://localhost/api/" URL_PUBLISH = URL_BASE + "publish" URL_CREATE_DEVICE_TYPE = URL_BASE + "device_type/create" URL_CREATE_DEVICE = URL_BASE + "device/create" URL_CREATE_SCENE = URL_BASE + "scene/create" URL_ADD_ACTION_TO_SCENE = URL_BASE + "scene/add_action" URL_SET_ACTION = URL_BASE + "device/set_action" URL_TRIGGER_ACTION = URL_BASE + "device/action" URL_TRIGGER_SCENE = URL_BASE + "scene/trigger" URL_AUTHORIZE_USER = URL_BASE + "device/authorize" URL_REVOKE_USER = URL_BASE + "device/revoke" URL_GET_DEVICE = URL_BASE + "device/get" URL_GET_DEVICE_DATA_BY_RANGE = URL_BASE + "data/get_by_num_range" URL_GET_DEVICE_DATA = URL_BASE + "data/get_device_data" URL_START_KEY_EXCHANGE = URL_BASE + "exchange_session_keys" URL_RECEIVE_PUBLIC_KEY = URL_BASE + "retrieve_public_key" URL_REGISTER_TO_BROKER = URL_BASE + "user/broker_register" URL_DELETE_ACCOUNT = "https://localhost/delete_account" AA_URL_BASE = "https://localhost/attr_auth/" AA_URL_SET_USERNAME = AA_URL_BASE + "set_username" AA_URL_DELETE_ACCOUNT = AA_URL_BASE + "delete_account" AA_URL_SETUP = AA_URL_BASE + "setup" AA_URL_KEYGEN = AA_URL_BASE + "user/keygen" AA_URL_DEVICE_KEYGEN = AA_URL_BASE + "device/keygen" AA_URL_SK_RETRIEVE = AA_URL_BASE + "user/retrieve_private_keys" AA_URL_ENCRYPT = AA_URL_BASE + "encrypt" AA_URL_DECRYPT = AA_URL_BASE + "decrypt" dir_path = os.path.dirname(os.path.realpath(__file__)) path = f'{dir_path}/keystore.json' fake_tuple_data = None @click.group() @click.pass_context def user(ctx): global VERIFY_CERTS global MQTT_BROKER global MQTT_PORT VERIFY_CERTS = ctx.obj['VERIFY_CERTS'] MQTT_BROKER = ctx.obj['BROKER'] MQTT_PORT = ctx.obj['PORT'] @user.command() @click.argument('user_id') @click.argument('device_id') @click.argument('data') def send_message(user_id, device_id, data): doc = search_tinydb_doc(path, 'device_keys', Query().device_id == device_id) if not doc: with click.Context(send_key_to_device) as ctx: click.echo(f"Keys for device {device_id} not present, please use: {ctx.command.name}") click.echo(send_key_to_device.get_help(ctx)) return fernet_key = hex_to_fernet(doc["shared_key"]) token = fernet_key.encrypt(data.encode()) client = _setup_client(user_id) payload = f'"{json.dumps(_create_payload({"ciphertext": token.decode()}, user_id))}"' ret = client.publish(f"u:{user_id}/d:{device_id}/", payload) click.echo(f"RC and MID = {ret}") @user.command() @click.argument('password') @click.option('--token', envvar='ACCESS_TOKEN') def register_to_broker(password, token): password_hash = pbkdf2_hash(password) data = {"password": password_hash} r = requests.post(URL_REGISTER_TO_BROKER, headers={"Authorization": token}, data=data, verify=VERIFY_CERTS) content = json.loads(r.content.decode('unicode-escape')) table = get_tinydb_table(path, 'credentials') table.upsert({ "broker_id": content["broker_id"], "broker_password": password }, Query().broker_id == content["broker_id"]) click.echo(r.content.decode('unicode-escape')) @user.command() @click.option('--token', envvar='ACCESS_TOKEN') @click.option('--aa/--server') def delete_account(token, aa): """Triggered by: ./cli.py -b "172.26.0.8" user delete-account --token "7jagPr4edVdghcsBNkjd23))" --aa ./cli.py -b "172.26.0.8" user delete-account --token 5c36ab84439c55a3c196f4csd9bd7b3d9291f39g --server """ if aa: url = AA_URL_DELETE_ACCOUNT else: url = URL_DELETE_ACCOUNT r = requests.post(url, headers={"Authorization": token}, verify=VERIFY_CERTS) content = json.loads(r.content.decode('unicode-escape')) click.echo(content) @user.command() @click.argument('description') @click.option('--token', envvar='ACCESS_TOKEN') def create_device_type(description, token): if len(get_tinydb_table(path, 'device_type_keys')) == 0: init_device_type_keys() table = get_tinydb_table(path, 'device_type_keys') doc = table.all()[0] desc_ciphertext = encrypt_using_fernet_hex(doc["description"], description) data = {"description": desc_ciphertext, "correctness_hash": correctness_hash(description)} r = requests.post(URL_CREATE_DEVICE_TYPE, headers={"Authorization": token}, data=data, verify=VERIFY_CERTS) click.echo(r.content.decode('unicode-escape')) @user.command() @click.argument('device_type_id') @click.argument('device_name') @click.argument('password') @click.option('--token', envvar='ACCESS_TOKEN') def create_device(device_type_id, device_name, password, token): password_hash = pbkdf2_hash(password) bi_key = os.urandom(32) device_name_key = key_to_hex(os.urandom(32)) # NOTE: retrieve key as `key_to_hex(key)` device_status_key = key_to_hex(os.urandom(32)) data = { "type_id": device_type_id, "name": hex_to_fernet(device_name_key).encrypt(device_name.encode()), "correctness_hash": correctness_hash(device_name), "name_bi": blind_index(bi_key, device_name), "password": password_hash } r = requests.post(URL_CREATE_DEVICE, headers={"Authorization": token}, data=data, verify=VERIFY_CERTS) content = json.loads(r.content.decode('unicode-escape')) if content["success"]: insert_into_tinydb(path, 'device_keys', { 'device_id': str(content["id"]), 'bi_key': key_to_hex(bi_key), 'device:name': device_name_key, 'device:status': device_status_key }) click.echo(r.content.decode('unicode-escape')) @user.command() @click.argument('name') @click.argument('description') @click.option('--token', envvar='ACCESS_TOKEN') def create_scene(name, description, token): if not is_global_bi_key_missing(init_global_keys, "Blind index key for scene name is missing"): if len(get_tinydb_table(path, 'scene_keys')) == 0: init_scene_keys() table = get_tinydb_table(path, 'scene_keys') doc = table.all()[0] name_ciphertext = encrypt_using_fernet_hex(doc["name"], name) desc_ciphertext = encrypt_using_fernet_hex(doc["description"], description) data = { "name": name_ciphertext, "correctness_hash": correctness_hash(name), "name_bi": blind_index(get_global_bi_key(), name), "description": desc_ciphertext } r = requests.post(URL_CREATE_SCENE, headers={"Authorization": token}, data=data, verify=VERIFY_CERTS) click.echo(r.content.decode('unicode-escape')) def init_scene_keys(): table = get_tinydb_table(path, 'scene_keys') table.upsert({ 'name': key_to_hex(os.urandom(32)), 'description': key_to_hex(os.urandom(32)) }, where('name').exists() & where('description').exists()) def init_device_type_keys(): table = get_tinydb_table(path, 'device_type_keys') table.upsert({ 'description': key_to_hex(os.urandom(32)), }, where('description').exists()) @user.command() def init_global_keys(): table = get_tinydb_table(path, 'global') table.upsert({ 'bi_key': key_to_hex(os.urandom(32)), 'scene_key': key_to_hex(os.urandom(32)), }, where('bi_key').exists() & where('scene_key').exists()) @user.command() @click.argument('scene_name') @click.argument('action_name') @click.argument('device_id') @click.option('--token', envvar='ACCESS_TOKEN') def add_scene_action(scene_name, action_name, device_id, token): if not is_global_bi_key_missing(create_device, "Blind index key for scene name is missing"): data = { "scene_name_bi": blind_index(get_global_bi_key(), scene_name), "action_name_bi": blind_index(get_device_bi_key(device_id), action_name), } r = requests.post(URL_ADD_ACTION_TO_SCENE, headers={"Authorization": token}, data=data, verify=VERIFY_CERTS) click.echo(r.content.decode('unicode-escape')) @user.command() @click.argument('device_id') @click.argument('name') @click.option('--token', envvar='ACCESS_TOKEN') def set_action(device_id, name, token): doc = search_tinydb_doc(path, 'device_keys', Query().device_id == str(device_id)) if not doc: with click.Context(send_column_keys) as ctx: click.echo(f"Keys for device {device_id} not present, please use: {ctx.command.name}") click.echo(send_column_keys.get_help(ctx)) return data = { "device_id": device_id, "name": encrypt_using_fernet_hex(doc["action:name"], name), "correctness_hash": correctness_hash(name), "name_bi": blind_index(get_device_bi_key(device_id), name) } r = requests.post(URL_SET_ACTION, headers={"Authorization": token}, data=data, verify=VERIFY_CERTS) click.echo(r.content.decode('unicode-escape')) @user.command() @click.argument('device_id') @click.argument('device_name') @click.argument('name') @click.option('--token', envvar='ACCESS_TOKEN') @click.option('--real/--fake', default=True) @click.option('--owner/--no-owner', default=True) def trigger_action(device_id, device_name, name, token, real, owner): if owner: r = _trigger_action_by_owner(device_id, device_name, name, token, real) else: r = _trigger_action_by_nonowner(device_id, device_name, name, token) click.echo(r.content.decode('unicode-escape')) def _trigger_action_by_owner(device_id, device_name, name, token, real): data = { "device_name_bi": blind_index(get_device_bi_key(device_id), device_name), "name_bi": blind_index(get_device_bi_key(device_id), name), } if real: data["additional_data"] = "real" else: data["additional_data"] = "fake" data["additional_data"] = encrypt_using_fernet_hex(get_shared_key_by_device_id(path, device_id), data["additional_data"]).decode() return requests.get(URL_TRIGGER_ACTION, headers={"Authorization": token}, params=data, verify=VERIFY_CERTS) def _trigger_action_by_nonowner(device_id, device_name, name, token): data = { "device_name_bi": blind_index(get_device_bi_key(device_id), device_name), "name_bi": blind_index(get_device_bi_key(device_id), name), "additional_data": b2a_hex(os.urandom(32)).decode() } return requests.get(URL_TRIGGER_ACTION, headers={"Authorization": token}, params=data, verify=VERIFY_CERTS) @user.command() @click.argument('device_id') @click.argument('device_name') @click.argument('start', type=click.DateTime()) @click.argument('end', type=click.DateTime()) @click.argument('number', type=int) @click.argument('action_names', nargs=-1) @click.option('--token', envvar='ACCESS_TOKEN') def schedule_fake_actions(device_id, device_name, start, end, number, action_names, token): if (start < datetime.now() or end < datetime.now()) or start > end: click.echo("Invalid start or end time supplied.") return td = end - start times = sorted([random.random() * td for _ in range(number)]) actions = random.choices(action_names, k=number) sched = BlockingScheduler() for t, a in zip(times, actions): sched.add_job(_trigger_action_by_owner, 'date', [device_id, device_name, a, token, False], run_date=start+t) sched.start() @user.command() @click.argument('name') @click.option('--token', envvar='ACCESS_TOKEN') @click.option('--real/--fake', default=True) def trigger_scene(name, token, real): data = { "name_bi": blind_index(get_global_bi_key(), name) } if real: data["additional_data"] = "real" else: data["additional_data"] = "fake" data["additional_data"] = encrypt_using_fernet_hex(key_to_hex(get_global_scene_key()), data["additional_data"]).decode() r = requests.get(URL_TRIGGER_SCENE, headers={"Authorization": token}, params=data, verify=VERIFY_CERTS) click.echo(r.content.decode('unicode-escape')) @user.command() @click.argument('device_id') @click.argument('device_name') @click.argument('auth_user_id') @click.option('--token', envvar='ACCESS_TOKEN') def authorize_user(device_id, device_name, auth_user_id, token): data = { "device_name_bi": blind_index(get_device_bi_key(device_id), device_name), "auth_user_id": auth_user_id } r = requests.post(URL_AUTHORIZE_USER, headers={"Authorization": token}, data=data, verify=VERIFY_CERTS) click.echo(r.content.decode('unicode-escape')) @user.command() @click.argument('device_id') @click.argument('device_name') @click.argument('revoke_user_id') @click.option('--token', envvar='ACCESS_TOKEN') def revoke_user(device_id, device_name, revoke_user_id, token): data = { "device_name_bi": blind_index(get_device_bi_key(device_id), device_name), "revoke_user_id": revoke_user_id } r = requests.post(URL_REVOKE_USER, headers={"Authorization": token}, data=data, verify=VERIFY_CERTS) click.echo(r.content.decode('unicode-escape')) @user.command() @click.argument('device_name') @click.argument('device_id') @click.option('--token', envvar='ACCESS_TOKEN') def get_devices(device_name, device_id, token): """Triggered using: ./cli.py -b "172.26.0.8" user get-devices test_device 46 --token 5c36ab84439c55a3c196f4csd9bd7b3d9291f39g""" device_name_bi = blind_index(get_device_bi_key(device_id), device_name) data = {"name_bi": device_name_bi} r = requests.get(URL_GET_DEVICE, headers={"Authorization": token}, params=data, verify=VERIFY_CERTS) content = json.loads(r.content.decode('unicode-escape')) table = get_tinydb_table(path, 'device_keys') for device in content["devices"]: ciphertext = device["name"] doc = table.get(Query().device_id == str(device["id"])) plaintext = decrypt_using_fernet_hex(doc["device:name"], ciphertext) device["name"] = plaintext.decode() check_correctness_hash(content["devices"], "name") click.echo(content["devices"]) @user.command() @click.argument('user_id') @click.argument('device_id') @click.argument('device_name') @click.option('--lower', required=False) @click.option('--upper', required=False) @click.option('--token', envvar='ACCESS_TOKEN') def get_device_data_by_num_range(user_id, device_id, device_name, lower=None, upper=None, token=""): if lower is not None and upper is not None and upper <= lower: click.echo("Upper bound needs to be greater then lower bound.") return device_name_bi = blind_index(get_device_bi_key(device_id), device_name) if lower is not None and upper is not None: data = {"lower": int(lower), "upper": int(upper), "device_name_bi": device_name_bi} elif lower is not None and upper is None: upper = 214748364700 # 100000000000 data = {"lower": int(lower), "device_name_bi": device_name_bi} elif lower is None and upper is not None: lower = -214748364800 # -100000000000 data = {"upper": int(upper), "device_name_bi": device_name_bi} else: lower = -214748364800 # -100000000000 upper = 214748364700 # 100000000000 data = {"lower": lower, "upper": upper, "device_name_bi": device_name_bi} r = requests.get(URL_GET_DEVICE_DATA_BY_RANGE, headers={"Authorization": token}, params=data, verify=VERIFY_CERTS) content = r.content.decode('unicode-escape') json_content = json_string_with_bytes_to_dict(content) _get_fake_tuple_data(int(user_id), int(device_id)) decrypted_fake_tuple_data = { "device_data": json.loads(decrypt_using_fernet_hex(get_shared_key_by_device_id(path, device_id), fake_tuple_data["device_data"]).decode())} fake_tuples, rows = _divide_fake_and_real_data(json_content["device_data"], str(device_id), decrypted_fake_tuple_data) generated_tuples = generate_fake_tuples_in_range(decrypted_fake_tuple_data["device_data"]) expected_fake_rows = slice_by_range(generated_tuples, int(lower), int(upper), "device_data:num_data") verify_integrity_data(expected_fake_rows, fake_tuples) if json_content["success"]: check_correctness_hash(rows, 'added', 'data', 'num_data', 'tid') result = [] for row in rows: try: result.append(unpad_row("data", row)) except Exception as e: click.echo(str(e)) click.echo('{"device_data":' + str(result).replace("'", '"') + '}') def slice_by_range(all_tuples, lower, upper, key_name): result = [] for row in all_tuples: if lower <= row[key_name.split(":")[1]] <= upper: result.append(row) return result @user.command() @click.argument('device_id') @click.option('--token', envvar='ACCESS_TOKEN') def send_key_to_device(device_id, token): if not is_device_bi_key_missing(device_id, create_device, "Blind index key for device name is missing"): private_key = ec.generate_private_key(ec.SECP384R1(), default_backend()) public_key = private_key.public_key() public_pem = public_key.public_bytes( encoding=serialization.Encoding.PEM, format=serialization.PublicFormat.SubjectPublicKeyInfo ).decode('utf-8') private_pem = private_key.private_bytes( encoding=serialization.Encoding.PEM, format=serialization.PrivateFormat.TraditionalOpenSSL, encryption_algorithm=serialization.NoEncryption() ).decode() table = get_tinydb_table(path, 'device_keys') table.upsert({ 'device_id': device_id, 'public_key': public_pem, 'private_key': private_pem, 'bi_key': key_to_hex(get_device_bi_key(device_id)) }, Query().device_id == device_id) data = { 'device_id': device_id, 'public_key': public_pem } r = requests.post(URL_START_KEY_EXCHANGE, headers={"Authorization": token}, data=data, verify=VERIFY_CERTS) click.echo(r.content.decode('unicode-escape')) @user.command() @click.argument('device_id') @click.option('--token', envvar='ACCESS_TOKEN') def retrieve_device_public_key(device_id, token): data = { "device_id": device_id } table = get_tinydb_table(path, 'device_keys') doc = table.get(Query().device_id == device_id) if not doc: with click.Context(send_key_to_device) as ctx: click.echo(f"Keys for device {device_id} not present, please use: {ctx.command.name}") click.echo(get_attr_auth_keys.get_help(ctx)) return r = requests.post(URL_RECEIVE_PUBLIC_KEY, headers={"Authorization": token}, data=data, verify=VERIFY_CERTS) if r.status_code != 200: click.echo(r.content.decode('unicode-escape')) return content = r.content.decode('unicode-escape') json_content = json_string_with_bytes_to_dict(content) private_key = load_pem_private_key(doc["private_key"].encode(), password=None, backend=default_backend()) assert isinstance(private_key, EllipticCurvePrivateKey), "Loading private key failed! - private_key is not instance of EllipticCurvePrivateKey" device_public_key = load_pem_public_key(json_content["device_public_key"].encode(), backend=default_backend()) assert isinstance(device_public_key, EllipticCurvePublicKey), "Loading public key failed! - private_key is not instance of EllipticCurvePublicKey" shared_key = private_key.exchange(ec.ECDH(), device_public_key) derived_key = HKDF( algorithm=hashes.SHA256(), length=32, salt=None, info=b'handshake data', backend=default_backend() ).derive(shared_key) key = key_to_hex(derived_key) # NOTE: retrieve key as `key_to_hex(key)` table.update(delete("public_key"), Query().device_id == device_id) table.update(delete("private_key"), Query().device_id == device_id) table.update(set("shared_key", key), Query().device_id == device_id) @user.command() @click.argument('user_id') @click.argument('device_id') @click.argument('policy') def send_column_keys(user_id, device_id, policy): table = get_tinydb_table(path, 'device_keys') doc = table.get(Query().device_id == device_id) if not doc: with click.Context(send_key_to_device) as ctx: click.echo(f"Keys for device {device_id} not present, please use: {ctx.command.name}") click.echo(get_attr_auth_keys.get_help(ctx)) return fernet_key = hex_to_fernet(doc["shared_key"]) keys = { "action:name": None, "device_data:added": None, "device_data:num_data": None, "device_data:tid": None } payload_keys = {} for k in keys: random_bytes = os.urandom(32) keys[k] = key_to_hex(random_bytes) # NOTE: retrieve key as `key_to_hex(key)` payload_keys[k] = fernet_key.encrypt(random_bytes).decode() # payload_keys["device_data:data"] = fernet_key.encrypt(get_aa_public_key().encode()).decode() abe_key_and_policy = json.dumps({ "public_key": get_aa_public_key(), "policy": policy }).encode() payload_keys["device_data:data"] = fernet_key.encrypt(abe_key_and_policy).decode() payload_keys["device:name"] = fernet_key.encrypt(hex_to_key(doc["device:name"])).decode() payload_keys["device:status"] = fernet_key.encrypt(hex_to_key(doc["device:status"])).decode() payload_keys["bi_key"] = fernet_key.encrypt(hex_to_key(doc["bi_key"])).decode() payload_keys["scene_key"] = fernet_key.encrypt(get_global_scene_key()).decode() doc = {**doc, **keys} table.upsert(doc, Query().device_id == device_id) client = _setup_client(user_id) payload = f'"{json.dumps(_create_payload(payload_keys, user_id))}"' ret = client.publish(f"u:{user_id}/d:{device_id}/", payload) click.echo(f"RC and MID = {ret}") @user.command() @click.argument('device_id') @click.argument('attr_list', nargs=-1) @click.option('--token', envvar='AA_ACCESS_TOKEN') def attr_auth_device_keygen(device_id, attr_list, token): if device_id not in " ".join(attr_list): click.echo(f"attr_list argument should contain device_id ({device_id})") return doc = search_tinydb_doc(path, 'aa_keys', where('public_key').exists()) if not doc: with click.Context(get_attr_auth_keys) as ctx: click.echo(f"Public key not present, please use: {ctx.command.name}") click.echo(get_attr_auth_keys.get_help(ctx)) return data = { "attr_list": " ".join(attr_list) } r = requests.post(AA_URL_DEVICE_KEYGEN, headers={"Authorization": token}, data=data, verify=VERIFY_CERTS) content = r.content.decode('unicode-escape') json_content = json_string_with_bytes_to_dict(content) if not json_content["success"]: click.echo(json_content) return t = get_tinydb_table(path, "device_keys") device_data_doc = { "private_key": json_content["private_key"], "attr_list": attr_list, } t.update(set("device_data:data", device_data_doc), Query().device_id == device_id) @user.command() @click.option('--token', envvar='AA_ACCESS_TOKEN') def attr_auth_retrieve_private_keys(token): """Triggered by: ./cli.py -b "172.26.0.8" user attr-auth-retrieve-private-keys --token '7jagPr4edVdgvyyBNkjdaQ))'""" r = requests.post(AA_URL_SK_RETRIEVE, headers={"Authorization": token}, verify=VERIFY_CERTS) click.echo(r.content.decode('unicode-escape')) @user.command() @click.argument('device_id') @click.argument('abe_pk', type=click.Path(exists=True)) @click.argument('bi_key') @click.option('--token', envvar='AA_ACCESS_TOKEN') def setup_authorized_device(device_id, abe_pk, bi_key, token): r = requests.post(AA_URL_SK_RETRIEVE, headers={"Authorization": token}, verify=VERIFY_CERTS) content = json.loads(r.content.decode('unicode-escape')) abe_sk = next((key for key in content['private_keys'] if str(key["device_id"]) == device_id), None) if abe_sk is None: click.echo(f"Key for device: {device_id} is not present.") return del abe_sk["key_update"] del abe_sk["challenger_id"] del abe_sk["device_id"] abe_sk["private_key"] = abe_sk.pop("data") abe_sk["attr_list"] = abe_sk.pop("attributes") data = { "device_data:data": { **abe_sk, "public_key": open(abe_pk).read().strip() }, "device_id": device_id, "bi_key": bi_key, } insert_into_tinydb(path, 'device_keys', data) @user.command() @click.argument('user_id') @click.argument('device_id') @click.argument('device_name') @click.option('--owner/--no-owner', default=True) @click.option('--token', envvar='ACCESS_TOKEN') def get_device_data(user_id, device_id, device_name, owner, token): """ Queries server for data of :param device_id device and then verifies the received data using integrity information from device (received using MQTT Broker) and correctness hash attribute of each DB row. """ user_id = int(user_id) device_name_bi = blind_index(get_device_bi_key(device_id), device_name) data = {"device_name_bi": device_name_bi} r = requests.get(URL_GET_DEVICE_DATA, headers={"Authorization": token}, params=data, verify=VERIFY_CERTS) content = r.content.decode('unicode-escape') json_content = json_string_with_bytes_to_dict(content) if not json_content["success"]: click.echo(json_content["error"]) return if owner: _get_fake_tuple_data(user_id, int(device_id)) decrypted_fake_tuple_data = { "device_data": json.loads(decrypt_using_fernet_hex(get_shared_key_by_device_id(path, device_id), fake_tuple_data["device_data"]).decode())} fake_tuples, rows = _divide_fake_and_real_data(json_content["device_data"], device_id, decrypted_fake_tuple_data) # NOTE: ^ Not checking for ability of user to decrypt (having SK that satisfies Ciphertext) because owner should have keys setup # so that he can decrypt all data from his devices verify_integrity_data(generate_fake_tuples_in_range(decrypted_fake_tuple_data["device_data"]), fake_tuples) check_correctness_hash(rows, 'added', 'data', 'num_data', 'tid') result = [] for row in rows: try: result.append(unpad_row("data", row)) except Exception as e: click.echo(str(e)) click.echo(result) else: get_foreign_device_data(device_id, json_content) def get_foreign_device_data(device_id, data): doc = search_tinydb_doc(path, 'device_keys', Query().device_id == str(device_id)) if not doc: click.echo(f"Keys for device: {device_id} are missing. You are probably not authorized to use it.") decrypted = [] for row in data["device_data"]: try: decrypted.append(decrypt_using_abe_serialized_key(row["data"], doc["device_data:data"]["public_key"], doc["device_data:data"]["private_key"])) except: click.echo("Cannot decrypt row.") result = [] for val in decrypted: try: if val.endswith("0"): result.append(unpad_payload_attr(val)) except Exception as e: click.echo(str(e)) click.echo(result) def _handle_on_message(mqtt_client, userdata, msg, device_id, user_id): try: msg.payload = bytes_to_json(msg.payload) except JSONDecodeError: click.echo(f"Received invalid payload: {msg.payload.decode()}") return topic = msg.topic.split("/") t_sender, sender_id = topic[0].split(":") t_receiver, receiver_id = topic[1].split(":") if t_sender == "d" and t_receiver == "u": if is_number(sender_id) and int(sender_id) == device_id and is_number(receiver_id) and int(receiver_id) == user_id: mqtt_client.disconnect() global fake_tuple_data fake_tuple_data = msg.payload return click.echo(f"Received invalid topic: {msg.topic}") def _get_fake_tuple_data(user_id, device_id): payload_dict = {"request": "fake_tuple_info"} def on_message(mqtt_client, userdata, msg): _handle_on_message(mqtt_client, userdata, msg, device_id, user_id) client = _setup_client(str({user_id})) payload = f'"{json.dumps(_create_payload(payload_dict, user_id))}"' sub_topic = f"d:{device_id}/u:{user_id}/" client.subscribe(sub_topic) client.publish(f"u:{user_id}/d:{device_id}/", payload) client.on_message = on_message click.echo(f"Subscribed to {sub_topic}") click.echo("Waiting for response, CTRL-C to terminate...") client.loop_forever() def _divide_fake_and_real_data(rows, device_id, integrity_info): """ Split data into 2 lists based on 'fakeness' Decrypts each row and computes fake correctness hash, then tests (using bcrypt) whether `correctness_hash` of row is 'same' as computed fake correctness hash :param device_id :param rows: example: [{ 'added': 37123, 'correctness_hash': '$2b$12$FSuBaNwezizWJcj47RxYJOpur2k49IJObfIPLDce5pKpRRZEASt6m', 'data': 'gAAAAABcUECMQMM0MjKknugGdI6YN81pLtmLUrcMsjHMBG87KpIJFWZF8n1DTVJX7VvnlVMMN4BNGdVROLeCD_I0XUs0IAK9AA==', 'device_id': 23, 'id': 1, 'num_data': -9199, 'tid': 3}, ...] """ db_col_names = ["device_data:added", "device_data:data", "device_data:tid", "device_data:num_data"] enc_keys = get_encryption_keys(device_id, db_col_names) col_types = {col: get_col_encryption_type(col, integrity_info) for col in db_col_names} key_type_pairs = {} for k, v in enc_keys.items(): if ":" in k: if k == "device_data:data": key_type_pairs[k.split(":")[1]] = [enc_keys[k][0], col_types[k], enc_keys[k][1]] # public, type, private else: key_type_pairs[k.split(":")[1]] = [enc_keys[k], col_types[k]] # private, type real, fake = [], [] for row in rows: modified = row modified.pop("id") modified.pop("device_id") modified.pop("tid_bi") row_correctness_hash = modified.pop("correctness_hash") decrypted = decrypt_row(modified, key_type_pairs) if is_fake(decrypted): decrypted["correctness_hash"] = row_correctness_hash fake.append(decrypted) else: decrypted["correctness_hash"] = row_correctness_hash real.append(decrypted) return fake, real def get_encryption_keys(device_id, db_keys): """ Retrieves encryption (decryption) keys corresponding to :param db_keys from TinyDB file :param device_id :param db_keys: list of TinyDB key names, e.g.: ["device_type:description", "action:name"] :return: dictionary of key, value pair of column name and encryption string, e.g.: {"action:name": "9dd1a57836a5...858372a8c0c42515", ...} """ doc = search_tinydb_doc(path, 'device_keys', Query().device_id == str(device_id)) result = {} for key in db_keys: if ":" in key: if key == "device_data:data": result[key] = [get_aa_public_key(), doc[key]["private_key"]] else: result[key] = doc[key] return result def get_col_encryption_type(col_name, integrity_info): """ Returns type based on whether the column is encrypted as number (OPE) or symmetrically (Fernet) or asymmetrically (ABE)- this is based on "type" attribute in TinyDB :param integrity_info: { 'device_data': { 'added': { 'seed': 12312412, 'lower_bound': 1, 'upper_bound': 1, 'type': "OPE" }}} :param col_name: e.g. "device_data:data" :return: """ table, col = col_name.split(":") return integrity_info[table][col]["type"] def decrypt_row(row, keys): """ :param row: example: { "added": 36976, "num_data": -9272, "data": "gAAAAABcTyUFZrhQRLzLvwep7j0Vm2UFjS2ylZ7bjB2YRueDpX15tobA0oOSEWBYZ4LaCKRa_h7WyKMacAAt-982srPPOR_1Cw==", "tid": 1 } :param keys: example: "added": ["26751017213ff85f189bedc34d302acfdf1649d5e1bac653a9709171ad37b155", "OPE"], "num_data": ["84964a963c097c550b41a085bbf1ad93ba5a1046aa5495d86d62f9623ab89cc6", "OPE"], "data": ["1fac0f8fa2083fe32c21d0PUBLIC_KEYf6959afb9f44623048e6875", "ABE", "rgesdrgPRIVATE_KEYedhder"] } """ result = {} for col, val in row.items(): if keys[col][1] == "Fernet": # if key is for fernet, create Fernet token result[col] = decrypt_using_fernet_hex(keys[col][0], val).decode() elif keys[col][1] == "OPE": # if key is for OPE, create OPE cipher result[col] = decrypt_using_ope_hex(keys[col][0], val) else: result[col] = decrypt_using_abe_serialized_key(val, keys[col][0], keys[col][2]) return result def is_fake(row_values): """ Check whether row is fake tuple based on "tid" attribute in row and computed hash from other attributes in row. :param row_values: dict with keys as column names and values as values from server DB (decrypted using `decrypt_row`) :return: bool """ return int(row_values["tid"]) >= 0 # Positive numbers are fake def verify_integrity_data(expected_tuples, present_rows): """ :param expected_tuples: list of dicts with keys as column names and values as values from server DB (generated fake tuples, that should be present in DB) :param present_rows: list of dicts with keys as column names and values as values from server DB (queried tuples) :return: False if any of the rows does not satisfy 'fakeness' check of if there less/more fake rows than there should be """ modified = copy.deepcopy(present_rows) for i, row in enumerate(modified): modified[i].pop("correctness_hash") if expected_tuples == modified: click.echo("Data Integrity satisfied.") else: click.echo("Data Integrity NOT satisfied.") def generate_fake_tuples_in_range(fake_tuple_info): """ Generates all fake tuples in <"lower_bound", "upper_bound"> range and verifies them againts :param fake_rows. :param fake_tuple_info: example: { "added": { "seed": 4574675, "lower_bound": 5, "upper_bound": 11, "is_numeric": True } :return: list of dicts (each dict contains single tuple with keys as column names and values) """ fake_tuple_col_values = {} fake_tuples = [] lb, ub = 0, 0 for col, val in fake_tuple_info.items(): lb = fake_tuple_info[col]["lower_bound"] ub = fake_tuple_info[col]["upper_bound"] if "seed" in fake_tuple_info[col]: fake_tuple_col_values[col] = [murmur_hash(str(i), fake_tuple_info[col]["seed"]) for i in range(lb, ub)] else: fake_tuple_col_values[col] = list(range(lb, ub)) for no, i in enumerate(range(lb, ub)): fake_tuples.append({"added": fake_tuple_col_values["added"][no], "num_data": fake_tuple_col_values["num_data"][no], "data": pad_payload_attr(str(fake_tuple_col_values["data"][no]), fake=True), "tid": str(fake_tuple_col_values["tid"][no])}) return fake_tuples def _setup_client(user_id): def on_publish(client, userdata, result): click.echo("Data published") client = paho.Client(user_id) client.on_publish = on_publish client.tls_set(ca_certs=os.path.join(os.path.dirname(__file__), "certs/server.crt"), certfile=None, keyfile=None, tls_version=ssl.PROTOCOL_TLSv1_2) client.tls_insecure_set(True) doc = search_tinydb_doc(path, 'credentials', where('broker_id').exists() & where('broker_password').exists()) client.username_pw_set(f"u:{doc['broker_id']}", doc['broker_password']) client.connect(MQTT_BROKER, MQTT_PORT, 30) return client @user.command() @click.argument('username') @click.option('--token', envvar='AA_ACCESS_TOKEN') def attr_auth_set_api_username(username, token): data = {"api_username": username} r = requests.post(AA_URL_SET_USERNAME, headers={"Authorization": token}, data=data, verify=VERIFY_CERTS) click.echo(r.content.decode('unicode-escape')) @user.command() @click.option('--token', envvar='AA_ACCESS_TOKEN') def get_attr_auth_keys(token): r = requests.get(AA_URL_SETUP, headers={"Authorization": token}, verify=VERIFY_CERTS) content = json.loads(r.content.decode('unicode-escape')) click.echo(f"Saving keys to {path}") table = get_tinydb_table(path, 'aa_keys') doc = table.get(where('public_key').exists()) data = {"public_key": content["public_key"]} if doc: table.update(data) else: table.insert(data) @user.command() @click.argument('api_username') @click.argument('device_id') @click.argument('attr_list', nargs=-1) @click.option('--token', envvar='AA_ACCESS_TOKEN') def attr_auth_keygen(api_username, device_id, attr_list, token): doc = search_tinydb_doc(path, 'aa_keys', where('public_key').exists()) if not doc: with click.Context(get_attr_auth_keys) as ctx: click.echo(f"Public key not present, please use: {ctx.command.name}") click.echo(get_attr_auth_keys.get_help(ctx)) return data = { "attr_list": " ".join(attr_list), "api_username": api_username, "device_id": device_id } r = requests.post(AA_URL_KEYGEN, headers={"Authorization": token}, data=data, verify=VERIFY_CERTS) click.echo(r.content.decode('unicode-escape')) @user.command() @click.argument('message') @click.argument('policy_string') @click.option('--token', envvar='AA_ACCESS_TOKEN') def attr_auth_encrypt(message, policy_string, token): data = { "message": message, "policy_string": policy_string } r = requests.get(AA_URL_ENCRYPT, headers={"Authorization": token}, params=data, verify=VERIFY_CERTS) click.echo(r.content.decode('unicode-escape')) @user.command() @click.argument('owner_username') @click.argument('ciphertext') @click.option('--token', envvar='AA_ACCESS_TOKEN') def attr_auth_decrypt(owner_username, ciphertext, token): data = { "api_username": owner_username, "ciphertext": ciphertext } r = requests.get(AA_URL_DECRYPT, headers={"Authorization": token}, params=data, verify=VERIFY_CERTS) click.echo(r.content.decode('unicode-escape')) def is_global_bi_key_missing(command, message): doc = search_tinydb_doc(path, 'global', Query().bi_key.exists()) if not doc: with click.Context(command) as ctx: click.echo(f"{message}, please use: {ctx.command.name}") click.echo(command.get_help(ctx)) return True return False def get_global_bi_key(): table = get_tinydb_table(path, 'global') doc_global = table.all()[0] return hex_to_key(doc_global["bi_key"]) def get_global_scene_key(): table = get_tinydb_table(path, 'global') doc_global = table.all()[0] return hex_to_key(doc_global["scene_key"]) def get_device_bi_key(device_id): doc = search_tinydb_doc(path, 'device_keys', Query().device_id == str(device_id)) return hex_to_key(doc["bi_key"]) def get_aa_public_key(): doc = search_tinydb_doc(path, 'aa_keys', where('public_key').exists()) return doc["public_key"] def is_device_bi_key_missing(device_id, command, message): doc = search_tinydb_doc(path, 'device_keys', Query().device_id == str(device_id)) if doc is None or "bi_key" not in doc: with click.Context(command) as ctx: click.echo(f"{message}, please use: {ctx.command.name}") click.echo(command.get_help(ctx)) return True return False if __name__ == '__main__': # json_data = json.loads(create_device_type("test desc").content.decode("utf-8")) # print(json_data) # print(create_device(json_data["type_id"]).content.decode("utf-8")) # send_message() ...
#!/usr/bin/env python import argparse import os import sqlite3 def main(): parser = argparse.ArgumentParser(description='Extract mol blocks of specified mol ids into SDF file and extract ' 'their docking scores.') parser.add_argument('-i', '--input', metavar='input.db', required=True, type=str, help='SQLite DB, which is output of vina_dock script.') parser.add_argument('-o', '--output', metavar='output.sdf', required=True, type=str, help='output SDF file.') parser.add_argument('-d', '--ids', metavar='mol_ids', required=False, type=str, default=None, help='comma separated list of mol ids in DB or a text file with mol ids on individual lines. ' 'If omitted all records in DB will be saved to SDF.') parser.add_argument('-f', '--first_entry', action='store_true', default=False, help='retrieve only the first entry of each molecule from the database.') parser.add_argument('-x', '--no_fields', action='store_true', default=False, help='choose this option if you do not want to retrieve any further fields from a database.') args = parser.parse_args() if args.ids is None: ids = None elif os.path.isfile(args.ids): with open(args.ids) as f: ids = [line.strip() for line in f] else: ids = args.ids.split(',') conn = sqlite3.connect(args.input) cur = conn.cursor() if args.no_fields: sql = "SELECT mol_block FROM mols WHERE mol_block IS NOT NULL" else: sql = "SELECT mol_block, docking_score FROM mols WHERE mol_block IS NOT NULL" if ids is not None: sql += f" WHERE id IN ({",".join(["?"] * len(ids))})" if args.first_entry: sql += " GROUP BY id HAVING MIN(rowid) ORDER BY rowid" if ids is not None: res = cur.execute(sql, ids) else: res = cur.execute(sql) with open(args.output, 'wt')as f: for item in res: mol_block = item[0] f.write(mol_block) if len(item) == 2: docking_score = item[1] f.write('> <docking_score>\n') f.write(f'{docking_score}\n\n') f.write('$$$$\n') if __name__ == '__main__': main()
#!/usr/bin/env python import argparse import os import sqlite3 def main(): parser = argparse.ArgumentParser(description='Extract mol blocks of specified mol ids into SDF file and extract ' 'their docking scores.') parser.add_argument('-i', '--input', metavar='input.db', required=True, type=str, help='SQLite DB, which is output of vina_dock script.') parser.add_argument('-o', '--output', metavar='output.sdf', required=True, type=str, help='output SDF file.') parser.add_argument('-d', '--ids', metavar='mol_ids', required=False, type=str, default=None, help='comma separated list of mol ids in DB or a text file with mol ids on individual lines. ' 'If omitted all records in DB will be saved to SDF.') parser.add_argument('-f', '--first_entry', action='store_true', default=False, help='retrieve only the first entry of each molecule from the database.') parser.add_argument('-x', '--no_fields', action='store_true', default=False, help='choose this option if you do not want to retrieve any further fields from a database.') args = parser.parse_args() if args.ids is None: ids = None elif os.path.isfile(args.ids): with open(args.ids) as f: ids = [line.strip() for line in f] else: ids = args.ids.split(',') conn = sqlite3.connect(args.input) cur = conn.cursor() if args.no_fields: sql = "SELECT mol_block FROM mols WHERE mol_block IS NOT NULL" else: sql = "SELECT mol_block, docking_score FROM mols WHERE mol_block IS NOT NULL" if ids is not None: sql += f" WHERE id IN ({','.join(['?'] * len(ids))})" if args.first_entry: sql += " GROUP BY id HAVING MIN(rowid) ORDER BY rowid" if ids is not None: res = cur.execute(sql, ids) else: res = cur.execute(sql) with open(args.output, 'wt')as f: for item in res: mol_block = item[0] f.write(mol_block) if len(item) == 2: docking_score = item[1] f.write('> <docking_score>\n') f.write(f'{docking_score}\n\n') f.write('$$$$\n') if __name__ == '__main__': main()
# coding: utf-8 """Plotting library.""" from copy import deepcopy from io import BytesIO from typing import Any, Dict, List, Optional, Tuple, Union import numpy as np from .basic import Booster, _log_warning from .compat import GRAPHVIZ_INSTALLED, MATPLOTLIB_INSTALLED from .sklearn import LGBMModel def _check_not_tuple_of_2_elements(obj: Any, obj_name: str = 'obj') -> None: """Check object is not tuple or does not have 2 elements.""" if not isinstance(obj, tuple) or len(obj) != 2: raise TypeError(f"{obj_name} must be a tuple of 2 elements.") def _float2str(value: float, precision: Optional[int] = None) -> str: return (f"{value:.{precision}f}" if precision is not None and not isinstance(value, str) else str(value)) def plot_importance( booster: Union[Booster, LGBMModel], ax=None, height: float = 0.2, xlim: Optional[Tuple[float, float]] = None, ylim: Optional[Tuple[float, float]] = None, title: Optional[str] = 'Feature importance', xlabel: Optional[str] = 'Feature importance', ylabel: Optional[str] = 'Features', importance_type: str = 'auto', max_num_features: Optional[int] = None, ignore_zero: bool = True, figsize: Optional[Tuple[float, float]] = None, dpi: Optional[int] = None, grid: bool = True, precision: Optional[int] = 3, **kwargs: Any ) -> Any: """Plot model's feature importances. Parameters ---------- booster : Booster or LGBMModel Booster or LGBMModel instance which feature importance should be plotted. ax : matplotlib.axes.Axes or None, optional (default=None) Target axes instance. If None, new figure and axes will be created. height : float, optional (default=0.2) Bar height, passed to ``ax.barh()``. xlim : tuple of 2 elements or None, optional (default=None) Tuple passed to ``ax.xlim()``. ylim : tuple of 2 elements or None, optional (default=None) Tuple passed to ``ax.ylim()``. title : str or None, optional (default="Feature importance") Axes title. If None, title is disabled. xlabel : str or None, optional (default="Feature importance") X-axis title label. If None, title is disabled. @importance_type@ placeholder can be used, and it will be replaced with the value of ``importance_type`` parameter. ylabel : str or None, optional (default="Features") Y-axis title label. If None, title is disabled. importance_type : str, optional (default="auto") How the importance is calculated. If "auto", if ``booster`` parameter is LGBMModel, ``booster.importance_type`` attribute is used; "split" otherwise. If "split", result contains numbers of times the feature is used in a model. If "gain", result contains total gains of splits which use the feature. max_num_features : int or None, optional (default=None) Max number of top features displayed on plot. If None or <1, all features will be displayed. ignore_zero : bool, optional (default=True) Whether to ignore features with zero importance. figsize : tuple of 2 elements or None, optional (default=None) Figure size. dpi : int or None, optional (default=None) Resolution of the figure. grid : bool, optional (default=True) Whether to add a grid for axes. precision : int or None, optional (default=3) Used to restrict the display of floating point values to a certain precision. **kwargs Other parameters passed to ``ax.barh()``. Returns ------- ax : matplotlib.axes.Axes The plot with model's feature importances. """ if MATPLOTLIB_INSTALLED: import matplotlib.pyplot as plt else: raise ImportError('You must install matplotlib and restart your session to plot importance.') if isinstance(booster, LGBMModel): if importance_type == "auto": importance_type = booster.importance_type booster = booster.booster_ elif isinstance(booster, Booster): if importance_type == "auto": importance_type = "split" else: raise TypeError('booster must be Booster or LGBMModel.') importance = booster.feature_importance(importance_type=importance_type) feature_name = booster.feature_name() if not len(importance): raise ValueError("Booster's feature_importance is empty.") tuples = sorted(zip(feature_name, importance), key=lambda x: x[1]) if ignore_zero: tuples = [x for x in tuples if x[1] > 0] if max_num_features is not None and max_num_features > 0: tuples = tuples[-max_num_features:] labels, values = zip(*tuples) if ax is None: if figsize is not None: _check_not_tuple_of_2_elements(figsize, 'figsize') _, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi) ylocs = np.arange(len(values)) ax.barh(ylocs, values, align='center', height=height, **kwargs) for x, y in zip(values, ylocs): ax.text(x + 1, y, _float2str(x, precision) if importance_type == 'gain' else x, va='center') ax.set_yticks(ylocs) ax.set_yticklabels(labels) if xlim is not None: _check_not_tuple_of_2_elements(xlim, 'xlim') else: xlim = (0, max(values) * 1.1) ax.set_xlim(xlim) if ylim is not None: _check_not_tuple_of_2_elements(ylim, 'ylim') else: ylim = (-1, len(values)) ax.set_ylim(ylim) if title is not None: ax.set_title(title) if xlabel is not None: xlabel = xlabel.replace('@importance_type@', importance_type) ax.set_xlabel(xlabel) if ylabel is not None: ax.set_ylabel(ylabel) ax.grid(grid) return ax def plot_split_value_histogram( booster: Union[Booster, LGBMModel], feature: Union[int, str], bins: Union[int, str, None] = None, ax=None, width_coef: float = 0.8, xlim: Optional[Tuple[float, float]] = None, ylim: Optional[Tuple[float, float]] = None, title: Optional[str] = 'Split value histogram for feature with @index/name@ @feature@', xlabel: Optional[str] = 'Feature split value', ylabel: Optional[str] = 'Count', figsize: Optional[Tuple[float, float]] = None, dpi: Optional[int] = None, grid: bool = True, **kwargs: Any ) -> Any: """Plot split value histogram for the specified feature of the model. Parameters ---------- booster : Booster or LGBMModel Booster or LGBMModel instance of which feature split value histogram should be plotted. feature : int or str The feature name or index the histogram is plotted for. If int, interpreted as index. If str, interpreted as name. bins : int, str or None, optional (default=None) The maximum number of bins. If None, the number of bins equals number of unique split values. If str, it should be one from the list of the supported values by ``numpy.histogram()`` function. ax : matplotlib.axes.Axes or None, optional (default=None) Target axes instance. If None, new figure and axes will be created. width_coef : float, optional (default=0.8) Coefficient for histogram bar width. xlim : tuple of 2 elements or None, optional (default=None) Tuple passed to ``ax.xlim()``. ylim : tuple of 2 elements or None, optional (default=None) Tuple passed to ``ax.ylim()``. title : str or None, optional (default="Split value histogram for feature with @index/name@ @feature@") Axes title. If None, title is disabled. @feature@ placeholder can be used, and it will be replaced with the value of ``feature`` parameter. @index/name@ placeholder can be used, and it will be replaced with ``index`` word in case of ``int`` type ``feature`` parameter or ``name`` word in case of ``str`` type ``feature`` parameter. xlabel : str or None, optional (default="Feature split value") X-axis title label. If None, title is disabled. ylabel : str or None, optional (default="Count") Y-axis title label. If None, title is disabled. figsize : tuple of 2 elements or None, optional (default=None) Figure size. dpi : int or None, optional (default=None) Resolution of the figure. grid : bool, optional (default=True) Whether to add a grid for axes. **kwargs Other parameters passed to ``ax.bar()``. Returns ------- ax : matplotlib.axes.Axes The plot with specified model's feature split value histogram. """ if MATPLOTLIB_INSTALLED: import matplotlib.pyplot as plt from matplotlib.ticker import MaxNLocator else: raise ImportError('You must install matplotlib and restart your session to plot split value histogram.') if isinstance(booster, LGBMModel): booster = booster.booster_ elif not isinstance(booster, Booster): raise TypeError('booster must be Booster or LGBMModel.') hist, split_bins = booster.get_split_value_histogram(feature=feature, bins=bins, xgboost_style=False) if np.count_nonzero(hist) == 0: raise ValueError('Cannot plot split value histogram, ' f'because feature {feature} was not used in splitting') width = width_coef * (split_bins[1] - split_bins[0]) centred = (split_bins[:-1] + split_bins[1:]) / 2 if ax is None: if figsize is not None: _check_not_tuple_of_2_elements(figsize, 'figsize') _, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi) ax.bar(centred, hist, align='center', width=width, **kwargs) if xlim is not None: _check_not_tuple_of_2_elements(xlim, 'xlim') else: range_result = split_bins[-1] - split_bins[0] xlim = (split_bins[0] - range_result * 0.2, split_bins[-1] + range_result * 0.2) ax.set_xlim(xlim) ax.yaxis.set_major_locator(MaxNLocator(integer=True)) if ylim is not None: _check_not_tuple_of_2_elements(ylim, 'ylim') else: ylim = (0, max(hist) * 1.1) ax.set_ylim(ylim) if title is not None: title = title.replace('@feature@', str(feature)) title = title.replace('@index/name@', ('name' if isinstance(feature, str) else 'index')) ax.set_title(title) if xlabel is not None: ax.set_xlabel(xlabel) if ylabel is not None: ax.set_ylabel(ylabel) ax.grid(grid) return ax def plot_metric( booster: Union[Dict, LGBMModel], metric: Optional[str] = None, dataset_names: Optional[List[str]] = None, ax=None, xlim: Optional[Tuple[float, float]] = None, ylim: Optional[Tuple[float, float]] = None, title: Optional[str] = 'Metric during training', xlabel: Optional[str] = 'Iterations', ylabel: Optional[str] = '@metric@', figsize: Optional[Tuple[float, float]] = None, dpi: Optional[int] = None, grid: bool = True ) -> Any: """Plot one metric during training. Parameters ---------- booster : dict or LGBMModel Dictionary returned from ``lightgbm.train()`` or LGBMModel instance. metric : str or None, optional (default=None) The metric name to plot. Only one metric supported because different metrics have various scales. If None, first metric picked from dictionary (according to hashcode). dataset_names : list of str, or None, optional (default=None) List of the dataset names which are used to calculate metric to plot. If None, all datasets are used. ax : matplotlib.axes.Axes or None, optional (default=None) Target axes instance. If None, new figure and axes will be created. xlim : tuple of 2 elements or None, optional (default=None) Tuple passed to ``ax.xlim()``. ylim : tuple of 2 elements or None, optional (default=None) Tuple passed to ``ax.ylim()``. title : str or None, optional (default="Metric during training") Axes title. If None, title is disabled. xlabel : str or None, optional (default="Iterations") X-axis title label. If None, title is disabled. ylabel : str or None, optional (default="@metric@") Y-axis title label. If 'auto', metric name is used. If None, title is disabled. @metric@ placeholder can be used, and it will be replaced with metric name. figsize : tuple of 2 elements or None, optional (default=None) Figure size. dpi : int or None, optional (default=None) Resolution of the figure. grid : bool, optional (default=True) Whether to add a grid for axes. Returns ------- ax : matplotlib.axes.Axes The plot with metric's history over the training. """ if MATPLOTLIB_INSTALLED: import matplotlib.pyplot as plt else: raise ImportError('You must install matplotlib and restart your session to plot metric.') if isinstance(booster, LGBMModel): eval_results = deepcopy(booster.evals_result_) elif isinstance(booster, dict): eval_results = deepcopy(booster) elif isinstance(booster, Booster): raise TypeError("booster must be dict or LGBMModel. To use plot_metric with Booster type, first record the metrics using record_evaluation callback then pass that to plot_metric as argument `booster`") else: raise TypeError('booster must be dict or LGBMModel.') num_data = len(eval_results) if not num_data: raise ValueError('eval results cannot be empty.') if ax is None: if figsize is not None: _check_not_tuple_of_2_elements(figsize, 'figsize') _, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi) if dataset_names is None: dataset_names_iter = iter(eval_results.keys()) elif not isinstance(dataset_names, (list, tuple, set)) or not dataset_names: raise ValueError('dataset_names should be iterable and cannot be empty') else: dataset_names_iter = iter(dataset_names) name = next(dataset_names_iter) # take one as sample metrics_for_one = eval_results[name] num_metric = len(metrics_for_one) if metric is None: if num_metric > 1: _log_warning("More than one metric available, picking one to plot.") metric, results = metrics_for_one.popitem() else: if metric not in metrics_for_one: raise KeyError('No given metric in eval results.') results = metrics_for_one[metric] num_iteration = len(results) max_result = max(results) min_result = min(results) x_ = range(num_iteration) ax.plot(x_, results, label=name) for name in dataset_names_iter: metrics_for_one = eval_results[name] results = metrics_for_one[metric] max_result = max(max(results), max_result) min_result = min(min(results), min_result) ax.plot(x_, results, label=name) ax.legend(loc='best') if xlim is not None: _check_not_tuple_of_2_elements(xlim, 'xlim') else: xlim = (0, num_iteration) ax.set_xlim(xlim) if ylim is not None: _check_not_tuple_of_2_elements(ylim, 'ylim') else: range_result = max_result - min_result ylim = (min_result - range_result * 0.2, max_result + range_result * 0.2) ax.set_ylim(ylim) if title is not None: ax.set_title(title) if xlabel is not None: ax.set_xlabel(xlabel) if ylabel is not None: ylabel = ylabel.replace('@metric@', metric) ax.set_ylabel(ylabel) ax.grid(grid) return ax def _to_graphviz( tree_info: Dict[str, Any], show_info: List[str], feature_names: Union[List[str], None], precision: Optional[int] = 3, orientation: str = 'horizontal', constraints: Optional[List[int]] = None, **kwargs: Any ) -> Any: """Convert specified tree to graphviz instance. See: - https://graphviz.readthedocs.io/en/stable/api.html#digraph """ if GRAPHVIZ_INSTALLED: from graphviz import Digraph else: raise ImportError('You must install graphviz and restart your session to plot tree.') def add(root, total_count, parent=None, decision=None): """Recursively add node or edge.""" if 'split_index' in root: # non-leaf l_dec = 'yes' r_dec = 'no' if root['decision_type'] == '<=': lte_symbol = "&#8804;" operator = lte_symbol elif root['decision_type'] == '==': operator = "=" else: raise ValueError('Invalid decision type in tree model.') name = f"split{root["split_index"]}" if feature_names is not None: label = f"<B>{feature_names[root["split_feature"]]}</B> {operator}" else: label = f"feature <B>{root["split_feature"]}</B> {operator} " label += f"<B>{_float2str(root["threshold"], precision)}</B>" for info in ['split_gain', 'internal_value', 'internal_weight', "internal_count", "data_percentage"]: if info in show_info: output = info.split('_')[-1] if info in {'split_gain', 'internal_value', 'internal_weight'}: label += f"<br/>{_float2str(root[info], precision)} {output}" elif info == 'internal_count': label += f"<br/>{output}: {root[info]}" elif info == "data_percentage": label += f"<br/>{_float2str(root["internal_count"] / total_count * 100, 2)}% of data" fillcolor = "white" style = "" if constraints: if constraints[root['split_feature']] == 1: fillcolor = "#ddffdd" # light green if constraints[root['split_feature']] == -1: fillcolor = "#ffdddd" # light red style = "filled" label = f"<{label}>" graph.node(name, label=label, shape="rectangle", style=style, fillcolor=fillcolor) add(root['left_child'], total_count, name, l_dec) add(root['right_child'], total_count, name, r_dec) else: # leaf name = f"leaf{root["leaf_index"]}" label = f"leaf {root["leaf_index"]}: " label += f"<B>{_float2str(root["leaf_value"], precision)}</B>" if 'leaf_weight' in show_info: label += f"<br/>{_float2str(root["leaf_weight"], precision)} weight" if 'leaf_count' in show_info: label += f"<br/>count: {root["leaf_count"]}" if "data_percentage" in show_info: label += f"<br/>{_float2str(root["leaf_count"] / total_count * 100, 2)}% of data" label = f"<{label}>" graph.node(name, label=label) if parent is not None: graph.edge(parent, name, decision) graph = Digraph(**kwargs) rankdir = "LR" if orientation == "horizontal" else "TB" graph.attr("graph", nodesep="0.05", ranksep="0.3", rankdir=rankdir) if "internal_count" in tree_info['tree_structure']: add(tree_info['tree_structure'], tree_info['tree_structure']["internal_count"]) else: raise Exception("Cannot plot trees with no split") if constraints: # "#ddffdd" is light green, "#ffdddd" is light red legend = """< <TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0" CELLPADDING="4"> <TR> <TD COLSPAN="2"><B>Monotone constraints</B></TD> </TR> <TR> <TD>Increasing</TD> <TD BGCOLOR="#ddffdd"></TD> </TR> <TR> <TD>Decreasing</TD> <TD BGCOLOR="#ffdddd"></TD> </TR> </TABLE> >""" graph.node("legend", label=legend, shape="rectangle", color="white") return graph def create_tree_digraph( booster: Union[Booster, LGBMModel], tree_index: int = 0, show_info: Optional[List[str]] = None, precision: Optional[int] = 3, orientation: str = 'horizontal', **kwargs: Any ) -> Any: """Create a digraph representation of specified tree. Each node in the graph represents a node in the tree. Non-leaf nodes have labels like ``Column_10 <= 875.9``, which means "this node splits on the feature named "Column_10", with threshold 875.9". Leaf nodes have labels like ``leaf 2: 0.422``, which means "this node is a leaf node, and the predicted value for records that fall into this node is 0.422". The number (``2``) is an internal unique identifier and doesn't have any special meaning. .. note:: For more information please visit https://graphviz.readthedocs.io/en/stable/api.html#digraph. Parameters ---------- booster : Booster or LGBMModel Booster or LGBMModel instance to be converted. tree_index : int, optional (default=0) The index of a target tree to convert. show_info : list of str, or None, optional (default=None) What information should be shown in nodes. - ``'split_gain'`` : gain from adding this split to the model - ``'internal_value'`` : raw predicted value that would be produced by this node if it was a leaf node - ``'internal_count'`` : number of records from the training data that fall into this non-leaf node - ``'internal_weight'`` : total weight of all nodes that fall into this non-leaf node - ``'leaf_count'`` : number of records from the training data that fall into this leaf node - ``'leaf_weight'`` : total weight (sum of Hessian) of all observations that fall into this leaf node - ``'data_percentage'`` : percentage of training data that fall into this node precision : int or None, optional (default=3) Used to restrict the display of floating point values to a certain precision. orientation : str, optional (default='horizontal') Orientation of the tree. Can be 'horizontal' or 'vertical'. **kwargs Other parameters passed to ``Digraph`` constructor. Check https://graphviz.readthedocs.io/en/stable/api.html#digraph for the full list of supported parameters. Returns ------- graph : graphviz.Digraph The digraph representation of specified tree. """ if isinstance(booster, LGBMModel): booster = booster.booster_ elif not isinstance(booster, Booster): raise TypeError('booster must be Booster or LGBMModel.') model = booster.dump_model() tree_infos = model['tree_info'] if 'feature_names' in model: feature_names = model['feature_names'] else: feature_names = None monotone_constraints = model.get('monotone_constraints', None) if tree_index < len(tree_infos): tree_info = tree_infos[tree_index] else: raise IndexError('tree_index is out of range.') if show_info is None: show_info = [] graph = _to_graphviz(tree_info, show_info, feature_names, precision, orientation, monotone_constraints, **kwargs) return graph def plot_tree( booster: Union[Booster, LGBMModel], ax=None, tree_index: int = 0, figsize: Optional[Tuple[float, float]] = None, dpi: Optional[int] = None, show_info: Optional[List[str]] = None, precision: Optional[int] = 3, orientation: str = 'horizontal', **kwargs: Any ) -> Any: """Plot specified tree. Each node in the graph represents a node in the tree. Non-leaf nodes have labels like ``Column_10 <= 875.9``, which means "this node splits on the feature named "Column_10", with threshold 875.9". Leaf nodes have labels like ``leaf 2: 0.422``, which means "this node is a leaf node, and the predicted value for records that fall into this node is 0.422". The number (``2``) is an internal unique identifier and doesn't have any special meaning. .. note:: It is preferable to use ``create_tree_digraph()`` because of its lossless quality and returned objects can be also rendered and displayed directly inside a Jupyter notebook. Parameters ---------- booster : Booster or LGBMModel Booster or LGBMModel instance to be plotted. ax : matplotlib.axes.Axes or None, optional (default=None) Target axes instance. If None, new figure and axes will be created. tree_index : int, optional (default=0) The index of a target tree to plot. figsize : tuple of 2 elements or None, optional (default=None) Figure size. dpi : int or None, optional (default=None) Resolution of the figure. show_info : list of str, or None, optional (default=None) What information should be shown in nodes. - ``'split_gain'`` : gain from adding this split to the model - ``'internal_value'`` : raw predicted value that would be produced by this node if it was a leaf node - ``'internal_count'`` : number of records from the training data that fall into this non-leaf node - ``'internal_weight'`` : total weight of all nodes that fall into this non-leaf node - ``'leaf_count'`` : number of records from the training data that fall into this leaf node - ``'leaf_weight'`` : total weight (sum of Hessian) of all observations that fall into this leaf node - ``'data_percentage'`` : percentage of training data that fall into this node precision : int or None, optional (default=3) Used to restrict the display of floating point values to a certain precision. orientation : str, optional (default='horizontal') Orientation of the tree. Can be 'horizontal' or 'vertical'. **kwargs Other parameters passed to ``Digraph`` constructor. Check https://graphviz.readthedocs.io/en/stable/api.html#digraph for the full list of supported parameters. Returns ------- ax : matplotlib.axes.Axes The plot with single tree. """ if MATPLOTLIB_INSTALLED: import matplotlib.image as image import matplotlib.pyplot as plt else: raise ImportError('You must install matplotlib and restart your session to plot tree.') if ax is None: if figsize is not None: _check_not_tuple_of_2_elements(figsize, 'figsize') _, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi) graph = create_tree_digraph(booster=booster, tree_index=tree_index, show_info=show_info, precision=precision, orientation=orientation, **kwargs) s = BytesIO() s.write(graph.pipe(format='png')) s.seek(0) img = image.imread(s) ax.imshow(img) ax.axis('off') return ax
# coding: utf-8 """Plotting library.""" from copy import deepcopy from io import BytesIO from typing import Any, Dict, List, Optional, Tuple, Union import numpy as np from .basic import Booster, _log_warning from .compat import GRAPHVIZ_INSTALLED, MATPLOTLIB_INSTALLED from .sklearn import LGBMModel def _check_not_tuple_of_2_elements(obj: Any, obj_name: str = 'obj') -> None: """Check object is not tuple or does not have 2 elements.""" if not isinstance(obj, tuple) or len(obj) != 2: raise TypeError(f"{obj_name} must be a tuple of 2 elements.") def _float2str(value: float, precision: Optional[int] = None) -> str: return (f"{value:.{precision}f}" if precision is not None and not isinstance(value, str) else str(value)) def plot_importance( booster: Union[Booster, LGBMModel], ax=None, height: float = 0.2, xlim: Optional[Tuple[float, float]] = None, ylim: Optional[Tuple[float, float]] = None, title: Optional[str] = 'Feature importance', xlabel: Optional[str] = 'Feature importance', ylabel: Optional[str] = 'Features', importance_type: str = 'auto', max_num_features: Optional[int] = None, ignore_zero: bool = True, figsize: Optional[Tuple[float, float]] = None, dpi: Optional[int] = None, grid: bool = True, precision: Optional[int] = 3, **kwargs: Any ) -> Any: """Plot model's feature importances. Parameters ---------- booster : Booster or LGBMModel Booster or LGBMModel instance which feature importance should be plotted. ax : matplotlib.axes.Axes or None, optional (default=None) Target axes instance. If None, new figure and axes will be created. height : float, optional (default=0.2) Bar height, passed to ``ax.barh()``. xlim : tuple of 2 elements or None, optional (default=None) Tuple passed to ``ax.xlim()``. ylim : tuple of 2 elements or None, optional (default=None) Tuple passed to ``ax.ylim()``. title : str or None, optional (default="Feature importance") Axes title. If None, title is disabled. xlabel : str or None, optional (default="Feature importance") X-axis title label. If None, title is disabled. @importance_type@ placeholder can be used, and it will be replaced with the value of ``importance_type`` parameter. ylabel : str or None, optional (default="Features") Y-axis title label. If None, title is disabled. importance_type : str, optional (default="auto") How the importance is calculated. If "auto", if ``booster`` parameter is LGBMModel, ``booster.importance_type`` attribute is used; "split" otherwise. If "split", result contains numbers of times the feature is used in a model. If "gain", result contains total gains of splits which use the feature. max_num_features : int or None, optional (default=None) Max number of top features displayed on plot. If None or <1, all features will be displayed. ignore_zero : bool, optional (default=True) Whether to ignore features with zero importance. figsize : tuple of 2 elements or None, optional (default=None) Figure size. dpi : int or None, optional (default=None) Resolution of the figure. grid : bool, optional (default=True) Whether to add a grid for axes. precision : int or None, optional (default=3) Used to restrict the display of floating point values to a certain precision. **kwargs Other parameters passed to ``ax.barh()``. Returns ------- ax : matplotlib.axes.Axes The plot with model's feature importances. """ if MATPLOTLIB_INSTALLED: import matplotlib.pyplot as plt else: raise ImportError('You must install matplotlib and restart your session to plot importance.') if isinstance(booster, LGBMModel): if importance_type == "auto": importance_type = booster.importance_type booster = booster.booster_ elif isinstance(booster, Booster): if importance_type == "auto": importance_type = "split" else: raise TypeError('booster must be Booster or LGBMModel.') importance = booster.feature_importance(importance_type=importance_type) feature_name = booster.feature_name() if not len(importance): raise ValueError("Booster's feature_importance is empty.") tuples = sorted(zip(feature_name, importance), key=lambda x: x[1]) if ignore_zero: tuples = [x for x in tuples if x[1] > 0] if max_num_features is not None and max_num_features > 0: tuples = tuples[-max_num_features:] labels, values = zip(*tuples) if ax is None: if figsize is not None: _check_not_tuple_of_2_elements(figsize, 'figsize') _, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi) ylocs = np.arange(len(values)) ax.barh(ylocs, values, align='center', height=height, **kwargs) for x, y in zip(values, ylocs): ax.text(x + 1, y, _float2str(x, precision) if importance_type == 'gain' else x, va='center') ax.set_yticks(ylocs) ax.set_yticklabels(labels) if xlim is not None: _check_not_tuple_of_2_elements(xlim, 'xlim') else: xlim = (0, max(values) * 1.1) ax.set_xlim(xlim) if ylim is not None: _check_not_tuple_of_2_elements(ylim, 'ylim') else: ylim = (-1, len(values)) ax.set_ylim(ylim) if title is not None: ax.set_title(title) if xlabel is not None: xlabel = xlabel.replace('@importance_type@', importance_type) ax.set_xlabel(xlabel) if ylabel is not None: ax.set_ylabel(ylabel) ax.grid(grid) return ax def plot_split_value_histogram( booster: Union[Booster, LGBMModel], feature: Union[int, str], bins: Union[int, str, None] = None, ax=None, width_coef: float = 0.8, xlim: Optional[Tuple[float, float]] = None, ylim: Optional[Tuple[float, float]] = None, title: Optional[str] = 'Split value histogram for feature with @index/name@ @feature@', xlabel: Optional[str] = 'Feature split value', ylabel: Optional[str] = 'Count', figsize: Optional[Tuple[float, float]] = None, dpi: Optional[int] = None, grid: bool = True, **kwargs: Any ) -> Any: """Plot split value histogram for the specified feature of the model. Parameters ---------- booster : Booster or LGBMModel Booster or LGBMModel instance of which feature split value histogram should be plotted. feature : int or str The feature name or index the histogram is plotted for. If int, interpreted as index. If str, interpreted as name. bins : int, str or None, optional (default=None) The maximum number of bins. If None, the number of bins equals number of unique split values. If str, it should be one from the list of the supported values by ``numpy.histogram()`` function. ax : matplotlib.axes.Axes or None, optional (default=None) Target axes instance. If None, new figure and axes will be created. width_coef : float, optional (default=0.8) Coefficient for histogram bar width. xlim : tuple of 2 elements or None, optional (default=None) Tuple passed to ``ax.xlim()``. ylim : tuple of 2 elements or None, optional (default=None) Tuple passed to ``ax.ylim()``. title : str or None, optional (default="Split value histogram for feature with @index/name@ @feature@") Axes title. If None, title is disabled. @feature@ placeholder can be used, and it will be replaced with the value of ``feature`` parameter. @index/name@ placeholder can be used, and it will be replaced with ``index`` word in case of ``int`` type ``feature`` parameter or ``name`` word in case of ``str`` type ``feature`` parameter. xlabel : str or None, optional (default="Feature split value") X-axis title label. If None, title is disabled. ylabel : str or None, optional (default="Count") Y-axis title label. If None, title is disabled. figsize : tuple of 2 elements or None, optional (default=None) Figure size. dpi : int or None, optional (default=None) Resolution of the figure. grid : bool, optional (default=True) Whether to add a grid for axes. **kwargs Other parameters passed to ``ax.bar()``. Returns ------- ax : matplotlib.axes.Axes The plot with specified model's feature split value histogram. """ if MATPLOTLIB_INSTALLED: import matplotlib.pyplot as plt from matplotlib.ticker import MaxNLocator else: raise ImportError('You must install matplotlib and restart your session to plot split value histogram.') if isinstance(booster, LGBMModel): booster = booster.booster_ elif not isinstance(booster, Booster): raise TypeError('booster must be Booster or LGBMModel.') hist, split_bins = booster.get_split_value_histogram(feature=feature, bins=bins, xgboost_style=False) if np.count_nonzero(hist) == 0: raise ValueError('Cannot plot split value histogram, ' f'because feature {feature} was not used in splitting') width = width_coef * (split_bins[1] - split_bins[0]) centred = (split_bins[:-1] + split_bins[1:]) / 2 if ax is None: if figsize is not None: _check_not_tuple_of_2_elements(figsize, 'figsize') _, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi) ax.bar(centred, hist, align='center', width=width, **kwargs) if xlim is not None: _check_not_tuple_of_2_elements(xlim, 'xlim') else: range_result = split_bins[-1] - split_bins[0] xlim = (split_bins[0] - range_result * 0.2, split_bins[-1] + range_result * 0.2) ax.set_xlim(xlim) ax.yaxis.set_major_locator(MaxNLocator(integer=True)) if ylim is not None: _check_not_tuple_of_2_elements(ylim, 'ylim') else: ylim = (0, max(hist) * 1.1) ax.set_ylim(ylim) if title is not None: title = title.replace('@feature@', str(feature)) title = title.replace('@index/name@', ('name' if isinstance(feature, str) else 'index')) ax.set_title(title) if xlabel is not None: ax.set_xlabel(xlabel) if ylabel is not None: ax.set_ylabel(ylabel) ax.grid(grid) return ax def plot_metric( booster: Union[Dict, LGBMModel], metric: Optional[str] = None, dataset_names: Optional[List[str]] = None, ax=None, xlim: Optional[Tuple[float, float]] = None, ylim: Optional[Tuple[float, float]] = None, title: Optional[str] = 'Metric during training', xlabel: Optional[str] = 'Iterations', ylabel: Optional[str] = '@metric@', figsize: Optional[Tuple[float, float]] = None, dpi: Optional[int] = None, grid: bool = True ) -> Any: """Plot one metric during training. Parameters ---------- booster : dict or LGBMModel Dictionary returned from ``lightgbm.train()`` or LGBMModel instance. metric : str or None, optional (default=None) The metric name to plot. Only one metric supported because different metrics have various scales. If None, first metric picked from dictionary (according to hashcode). dataset_names : list of str, or None, optional (default=None) List of the dataset names which are used to calculate metric to plot. If None, all datasets are used. ax : matplotlib.axes.Axes or None, optional (default=None) Target axes instance. If None, new figure and axes will be created. xlim : tuple of 2 elements or None, optional (default=None) Tuple passed to ``ax.xlim()``. ylim : tuple of 2 elements or None, optional (default=None) Tuple passed to ``ax.ylim()``. title : str or None, optional (default="Metric during training") Axes title. If None, title is disabled. xlabel : str or None, optional (default="Iterations") X-axis title label. If None, title is disabled. ylabel : str or None, optional (default="@metric@") Y-axis title label. If 'auto', metric name is used. If None, title is disabled. @metric@ placeholder can be used, and it will be replaced with metric name. figsize : tuple of 2 elements or None, optional (default=None) Figure size. dpi : int or None, optional (default=None) Resolution of the figure. grid : bool, optional (default=True) Whether to add a grid for axes. Returns ------- ax : matplotlib.axes.Axes The plot with metric's history over the training. """ if MATPLOTLIB_INSTALLED: import matplotlib.pyplot as plt else: raise ImportError('You must install matplotlib and restart your session to plot metric.') if isinstance(booster, LGBMModel): eval_results = deepcopy(booster.evals_result_) elif isinstance(booster, dict): eval_results = deepcopy(booster) elif isinstance(booster, Booster): raise TypeError("booster must be dict or LGBMModel. To use plot_metric with Booster type, first record the metrics using record_evaluation callback then pass that to plot_metric as argument `booster`") else: raise TypeError('booster must be dict or LGBMModel.') num_data = len(eval_results) if not num_data: raise ValueError('eval results cannot be empty.') if ax is None: if figsize is not None: _check_not_tuple_of_2_elements(figsize, 'figsize') _, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi) if dataset_names is None: dataset_names_iter = iter(eval_results.keys()) elif not isinstance(dataset_names, (list, tuple, set)) or not dataset_names: raise ValueError('dataset_names should be iterable and cannot be empty') else: dataset_names_iter = iter(dataset_names) name = next(dataset_names_iter) # take one as sample metrics_for_one = eval_results[name] num_metric = len(metrics_for_one) if metric is None: if num_metric > 1: _log_warning("More than one metric available, picking one to plot.") metric, results = metrics_for_one.popitem() else: if metric not in metrics_for_one: raise KeyError('No given metric in eval results.') results = metrics_for_one[metric] num_iteration = len(results) max_result = max(results) min_result = min(results) x_ = range(num_iteration) ax.plot(x_, results, label=name) for name in dataset_names_iter: metrics_for_one = eval_results[name] results = metrics_for_one[metric] max_result = max(max(results), max_result) min_result = min(min(results), min_result) ax.plot(x_, results, label=name) ax.legend(loc='best') if xlim is not None: _check_not_tuple_of_2_elements(xlim, 'xlim') else: xlim = (0, num_iteration) ax.set_xlim(xlim) if ylim is not None: _check_not_tuple_of_2_elements(ylim, 'ylim') else: range_result = max_result - min_result ylim = (min_result - range_result * 0.2, max_result + range_result * 0.2) ax.set_ylim(ylim) if title is not None: ax.set_title(title) if xlabel is not None: ax.set_xlabel(xlabel) if ylabel is not None: ylabel = ylabel.replace('@metric@', metric) ax.set_ylabel(ylabel) ax.grid(grid) return ax def _to_graphviz( tree_info: Dict[str, Any], show_info: List[str], feature_names: Union[List[str], None], precision: Optional[int] = 3, orientation: str = 'horizontal', constraints: Optional[List[int]] = None, **kwargs: Any ) -> Any: """Convert specified tree to graphviz instance. See: - https://graphviz.readthedocs.io/en/stable/api.html#digraph """ if GRAPHVIZ_INSTALLED: from graphviz import Digraph else: raise ImportError('You must install graphviz and restart your session to plot tree.') def add(root, total_count, parent=None, decision=None): """Recursively add node or edge.""" if 'split_index' in root: # non-leaf l_dec = 'yes' r_dec = 'no' if root['decision_type'] == '<=': lte_symbol = "&#8804;" operator = lte_symbol elif root['decision_type'] == '==': operator = "=" else: raise ValueError('Invalid decision type in tree model.') name = f"split{root['split_index']}" if feature_names is not None: label = f"<B>{feature_names[root['split_feature']]}</B> {operator}" else: label = f"feature <B>{root['split_feature']}</B> {operator} " label += f"<B>{_float2str(root['threshold'], precision)}</B>" for info in ['split_gain', 'internal_value', 'internal_weight', "internal_count", "data_percentage"]: if info in show_info: output = info.split('_')[-1] if info in {'split_gain', 'internal_value', 'internal_weight'}: label += f"<br/>{_float2str(root[info], precision)} {output}" elif info == 'internal_count': label += f"<br/>{output}: {root[info]}" elif info == "data_percentage": label += f"<br/>{_float2str(root['internal_count'] / total_count * 100, 2)}% of data" fillcolor = "white" style = "" if constraints: if constraints[root['split_feature']] == 1: fillcolor = "#ddffdd" # light green if constraints[root['split_feature']] == -1: fillcolor = "#ffdddd" # light red style = "filled" label = f"<{label}>" graph.node(name, label=label, shape="rectangle", style=style, fillcolor=fillcolor) add(root['left_child'], total_count, name, l_dec) add(root['right_child'], total_count, name, r_dec) else: # leaf name = f"leaf{root['leaf_index']}" label = f"leaf {root['leaf_index']}: " label += f"<B>{_float2str(root['leaf_value'], precision)}</B>" if 'leaf_weight' in show_info: label += f"<br/>{_float2str(root['leaf_weight'], precision)} weight" if 'leaf_count' in show_info: label += f"<br/>count: {root['leaf_count']}" if "data_percentage" in show_info: label += f"<br/>{_float2str(root['leaf_count'] / total_count * 100, 2)}% of data" label = f"<{label}>" graph.node(name, label=label) if parent is not None: graph.edge(parent, name, decision) graph = Digraph(**kwargs) rankdir = "LR" if orientation == "horizontal" else "TB" graph.attr("graph", nodesep="0.05", ranksep="0.3", rankdir=rankdir) if "internal_count" in tree_info['tree_structure']: add(tree_info['tree_structure'], tree_info['tree_structure']["internal_count"]) else: raise Exception("Cannot plot trees with no split") if constraints: # "#ddffdd" is light green, "#ffdddd" is light red legend = """< <TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0" CELLPADDING="4"> <TR> <TD COLSPAN="2"><B>Monotone constraints</B></TD> </TR> <TR> <TD>Increasing</TD> <TD BGCOLOR="#ddffdd"></TD> </TR> <TR> <TD>Decreasing</TD> <TD BGCOLOR="#ffdddd"></TD> </TR> </TABLE> >""" graph.node("legend", label=legend, shape="rectangle", color="white") return graph def create_tree_digraph( booster: Union[Booster, LGBMModel], tree_index: int = 0, show_info: Optional[List[str]] = None, precision: Optional[int] = 3, orientation: str = 'horizontal', **kwargs: Any ) -> Any: """Create a digraph representation of specified tree. Each node in the graph represents a node in the tree. Non-leaf nodes have labels like ``Column_10 <= 875.9``, which means "this node splits on the feature named "Column_10", with threshold 875.9". Leaf nodes have labels like ``leaf 2: 0.422``, which means "this node is a leaf node, and the predicted value for records that fall into this node is 0.422". The number (``2``) is an internal unique identifier and doesn't have any special meaning. .. note:: For more information please visit https://graphviz.readthedocs.io/en/stable/api.html#digraph. Parameters ---------- booster : Booster or LGBMModel Booster or LGBMModel instance to be converted. tree_index : int, optional (default=0) The index of a target tree to convert. show_info : list of str, or None, optional (default=None) What information should be shown in nodes. - ``'split_gain'`` : gain from adding this split to the model - ``'internal_value'`` : raw predicted value that would be produced by this node if it was a leaf node - ``'internal_count'`` : number of records from the training data that fall into this non-leaf node - ``'internal_weight'`` : total weight of all nodes that fall into this non-leaf node - ``'leaf_count'`` : number of records from the training data that fall into this leaf node - ``'leaf_weight'`` : total weight (sum of Hessian) of all observations that fall into this leaf node - ``'data_percentage'`` : percentage of training data that fall into this node precision : int or None, optional (default=3) Used to restrict the display of floating point values to a certain precision. orientation : str, optional (default='horizontal') Orientation of the tree. Can be 'horizontal' or 'vertical'. **kwargs Other parameters passed to ``Digraph`` constructor. Check https://graphviz.readthedocs.io/en/stable/api.html#digraph for the full list of supported parameters. Returns ------- graph : graphviz.Digraph The digraph representation of specified tree. """ if isinstance(booster, LGBMModel): booster = booster.booster_ elif not isinstance(booster, Booster): raise TypeError('booster must be Booster or LGBMModel.') model = booster.dump_model() tree_infos = model['tree_info'] if 'feature_names' in model: feature_names = model['feature_names'] else: feature_names = None monotone_constraints = model.get('monotone_constraints', None) if tree_index < len(tree_infos): tree_info = tree_infos[tree_index] else: raise IndexError('tree_index is out of range.') if show_info is None: show_info = [] graph = _to_graphviz(tree_info, show_info, feature_names, precision, orientation, monotone_constraints, **kwargs) return graph def plot_tree( booster: Union[Booster, LGBMModel], ax=None, tree_index: int = 0, figsize: Optional[Tuple[float, float]] = None, dpi: Optional[int] = None, show_info: Optional[List[str]] = None, precision: Optional[int] = 3, orientation: str = 'horizontal', **kwargs: Any ) -> Any: """Plot specified tree. Each node in the graph represents a node in the tree. Non-leaf nodes have labels like ``Column_10 <= 875.9``, which means "this node splits on the feature named "Column_10", with threshold 875.9". Leaf nodes have labels like ``leaf 2: 0.422``, which means "this node is a leaf node, and the predicted value for records that fall into this node is 0.422". The number (``2``) is an internal unique identifier and doesn't have any special meaning. .. note:: It is preferable to use ``create_tree_digraph()`` because of its lossless quality and returned objects can be also rendered and displayed directly inside a Jupyter notebook. Parameters ---------- booster : Booster or LGBMModel Booster or LGBMModel instance to be plotted. ax : matplotlib.axes.Axes or None, optional (default=None) Target axes instance. If None, new figure and axes will be created. tree_index : int, optional (default=0) The index of a target tree to plot. figsize : tuple of 2 elements or None, optional (default=None) Figure size. dpi : int or None, optional (default=None) Resolution of the figure. show_info : list of str, or None, optional (default=None) What information should be shown in nodes. - ``'split_gain'`` : gain from adding this split to the model - ``'internal_value'`` : raw predicted value that would be produced by this node if it was a leaf node - ``'internal_count'`` : number of records from the training data that fall into this non-leaf node - ``'internal_weight'`` : total weight of all nodes that fall into this non-leaf node - ``'leaf_count'`` : number of records from the training data that fall into this leaf node - ``'leaf_weight'`` : total weight (sum of Hessian) of all observations that fall into this leaf node - ``'data_percentage'`` : percentage of training data that fall into this node precision : int or None, optional (default=3) Used to restrict the display of floating point values to a certain precision. orientation : str, optional (default='horizontal') Orientation of the tree. Can be 'horizontal' or 'vertical'. **kwargs Other parameters passed to ``Digraph`` constructor. Check https://graphviz.readthedocs.io/en/stable/api.html#digraph for the full list of supported parameters. Returns ------- ax : matplotlib.axes.Axes The plot with single tree. """ if MATPLOTLIB_INSTALLED: import matplotlib.image as image import matplotlib.pyplot as plt else: raise ImportError('You must install matplotlib and restart your session to plot tree.') if ax is None: if figsize is not None: _check_not_tuple_of_2_elements(figsize, 'figsize') _, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi) graph = create_tree_digraph(booster=booster, tree_index=tree_index, show_info=show_info, precision=precision, orientation=orientation, **kwargs) s = BytesIO() s.write(graph.pipe(format='png')) s.seek(0) img = image.imread(s) ax.imshow(img) ax.axis('off') return ax
#!/usr/bin/env python3 # @Author : Yang Liu # @FileName : meth_stats_tool.py # @Software : NANOME project # @Organization : JAX Li Lab # @Website : https://github.com/TheJacksonLaboratory/nanome """ Tool for pre-processing results """ import argparse import glob import gzip import sys from collections import defaultdict from multiprocessing import Pool import h5py import numpy as np import pandas as pd from Bio import SeqIO from ont_fast5_api.fast5_interface import get_fast5_file from tqdm import tqdm from nanocompare.eval_common import load_tombo_df, load_deepmod_df, get_dna_base_from_reference, \ load_sam_as_strand_info_df, load_nanopolish_df from nanocompare.global_config import * from nanocompare.global_settings import humanChrSet def add_strand_info_for_nanopolish( nanopolish_fn='/projects/li-lab/yang/results/12-09/K562.nanopolish/K562.methylation_calls.tsv', sam_fn='/projects/li-lab/yang/results/12-09/K562.nanopolish/K562.sam'): """ No need for new nanopolish output Combine the nanopolish output tsv results with strand-info from SAM files. This will add last column as strand-info. This is due to original nanopolish output results contain no strand-info, we are going to solve this problem. Return results columns are: [(0, 'chromosome'), (1, 'start'), (2, 'end'), (3, 'read_name'), (4, 'log_lik_ratio'), (5, 'log_lik_methylated'), (6, 'log_lik_unmethylated'), (7, 'num_calling_strands'), (8, 'num_cpgs'), (9, 'sequence'), (10, 'strand-info')] :param nanopolish_fn: nanopolish file name :param sam_fn: SAM file name for strand-info :return: """ if args.i is not None: nanopolish_fn = args.i if args.ibam is not None: sam_fn = args.ibam df2 = load_sam_as_strand_info_df(infn=sam_fn) df1 = load_nanopolish_df(infn=nanopolish_fn) df = df1.merge(df2, left_on='read_name', right_on='read-name', how='left') df = df.drop('read-name', axis=1) logger.info(df) logger.info(list(enumerate(df.columns))) if len(df1) != len(df): raise Exception( "We found the read-name of Nanopolish results is not mapped all to SAM/BAM file, please check if the BAM file is used for Nanopolish") # df = df.iloc[:, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]] outfn = os.path.join(pic_base_dir, f'{os.path.splitext(os.path.basename(nanopolish_fn))[0]}-nanopolish-strand-info.tsv') df.to_csv(outfn, sep='\t', index=False) logger.info(f'save to {outfn}') return df def sanity_check_get_dna_seq(chrstr): """ Check 0-based start, input as 'chr1:123' :param chrstr: :return: """ chr, start = chrstr.strip().split(':') start = int(start) show_arrow = ''.join(['~'] * 5 + ['↑'] + ['~'] * 5) ret = get_dna_base_from_reference(chr, start, ref_fasta=ref_fasta) logger.info(f'chr={chr}, start={start}\nSEQ={ret}\nPOS={show_arrow}') def filter_noncg_sites_ref_seq(df, tagname, ntask=1, ttask=1, num_seq=5, chr_col=0, start_col=1, strand_col=5, toolname='tombo'): """ Filter out rows that are non-CG patterns in Tombo results, reference sequence is based on BAM files from SAM to BAM (with index) script is as follows: samtools view -S -b K562.sam > K562.bam samtools sort -o K562.sorted.bam K562.bam samtools index K562.sorted.bam :param tombo_fn: :param sam_fn: :return: """ chrs = df.iloc[:, chr_col].unique() chrs = np.sort(chrs) logger.info(chrs) logger.info(len(chrs)) all_list = list(range(len(df))) cpg_pattern_index = subset_of_list(all_list, ntask, ttask) # sel_chrs = subset_of_list(chrs, ntask, ttask) # logger.info(sel_chrs) # df = df[df[0].isin(sel_chrs)] df = df.iloc[cpg_pattern_index, :] logger.info(df) rep_chr = df.iloc[0, chr_col] seq_col = [] cpg_pattern_index = [] print_first = True for index, row in tqdm(df.iterrows()): if print_first: logger.info(f"index={index}, row={row}") print_first = False chr = row[chr_col] start = int(row[start_col]) strand_info = row[strand_col] # ret = get_dna_sequence_from_samfile(chr, start, start + num_seq, samfile) # may return None, if no sequence at all reads ret = get_dna_base_from_reference(chr, start, num_seq=num_seq, ref_fasta=ref_fasta) seq_col.append(ret) if toolname == 'tombo': if ret[5:7] == 'CG': cpg_pattern_index.append(index) elif toolname == 'deepmod': if strand_info == '+': if ret[5:7] == 'CG': cpg_pattern_index.append(index) elif strand_info == '-': if ret[4:6] == 'CG': cpg_pattern_index.append(index) # TODO: using ret if it is CG pattern, or will remove later # logger.info(f'chr={chr}, start={start}, strand={strand_info}, ret={ret}') # if index > 10000: # break df['sequence'] = seq_col logger.debug(f'before filter:{len(df)}, after non-CG filter:{len(cpg_pattern_index)}') df = df.loc[cpg_pattern_index, :] # tagname is like 'K562.tombo.perReadsStats.combine' # then outfn is like 'K562.tombo.perReadsStats.combine-with-seq-info-n300-t001-chr1.tsv' outfn = os.path.join(args.o, f'{tagname}-with-seq-info-n{ntask}-t{ttask:03d}-{rep_chr}.tsv') df.to_csv(outfn, sep='\t', header=False, index=False) logger.info(f"save to {outfn}") def filter_noncg_sites_ref_seq_mpi(df, tagname, ntask=1, ttask=1, num_dna_seq=5, chr_col=0, start_col=1, strand_col=5, toolname='tombo', print_first=False): """ MPI version invoke like: res = p.apply_async(testFunc, args=(2, 4), kwds={'calcY': False}) or pool.apply_async(test, (t,), dict(arg2=5)) Filter out rows that are non-CG patterns in Tombo results, reference sequence is based on BAM files :param tombo_fn: :param sam_fn: :return: """ rep_chr = df.iloc[0, chr_col] seq_col = [] only_cpg_pattern_index = [] for index, row in df.iterrows(): if print_first: logger.info(f"index={index}, row={row}") print_first = False chr = row[chr_col] start = int(row[start_col]) strand_info = row[strand_col] ret = get_dna_base_from_reference(chr, start, num_seq=num_dna_seq, ref_fasta=ref_fasta) seq_col.append(ret) if toolname == 'tombo': if ret[5:7] == 'CG': only_cpg_pattern_index.append(index) elif toolname in ['deepmod', 'deepmod-read-level']: if strand_info == '+': if ret[5:7] == 'CG': only_cpg_pattern_index.append(index) elif strand_info == '-': if ret[4:6] == 'CG': only_cpg_pattern_index.append(index) df['sequence'] = seq_col # logger.debug(f'Subprocess [{ttask}:{ntask}] finished, before filter:{len(df)}, after non-CG filter:{len(only_cpg_pattern_index)}') df = df.loc[only_cpg_pattern_index, :] # tagname is like 'K562.tombo.perReadsStats.combine' # then outfn is like 'K562.tombo.perReadsStats.combine-with-seq-info-n300-t001-chr1.tsv' # outfn = os.path.join(args.o, f'{tagname}-with-seq-info-n{ntask}-t{ttask:03d}-{rep_chr}.tsv') # df.to_csv(outfn, sep='\t', header=False, index=False) # logger.info(f"save to {outfn}") logger.info(f"Finished of subprocess {ttask}:{ntask}") return df def filter_noncg_sites_for_tombo( tombo_fn='/projects/li-lab/yang/workspace/nano-compare/data/tools-call-data/K562/K562.tombo_perReadsStats.bed', sam_fn='/projects/li-lab/yang/results/12-09/K562.nanopolish/K562.sorted.bam', ntask=1, ttask=1, num_seq=5): if args.i is not None: tombo_fn = args.i df = load_tombo_df(infn=tombo_fn) basefn = os.path.basename(tombo_fn) basename = os.path.splitext(basefn)[0] filter_noncg_sites_ref_seq(df=df, tagname=basename, ntask=ntask, ttask=ttask, num_seq=num_seq) def convert_bismark_add_strand_and_seq(indf, outfn, report_num=None): """ Check start pointer, if point to CG's C, it is positive strand, or else, it is reverse strand Note: input file is 1-based start, we also output to a 1-based format that is compatable to our Bismark import functions. :param indf: :param outf: :param report_num: :return: """ logger.debug(f'Start add strand and seq to bismark cov file, total len={len(indf)}') outf = gzip.open(outfn, 'wt') for index, row in tqdm(indf.iterrows()): # if report_num and index % report_num == 0: # logger.debug(f'processed index={index}') chr = row['chr'] start = int(row['start']) # Keep raw 1-based format of bismark results ret = get_dna_base_from_reference(chr, start - 1, ref_fasta=ref_fasta) if ret[5] == 'C': # strand is + strand = '+' elif ret[5] == 'G': strand = '-' else: raise Exception(f'We can not identify this bg-truth file with non-CG results, such as row={row}') outstr = '\t'.join([chr, str(start), strand, str(row['mcount']), str(row['ccount']), ret[4:7]]) outf.write(f'{outstr}\n') outf.close() logger.info(f'save to {outfn}') logger.debug(f'Finish add strand info task') def convert_bismark_cov_to_gw_format(df): """ Save adding strand info and dna seq format, which is in same format of Bismark Genome-wide output files :param df: :return: """ basefn = os.path.basename(args.i) basename = os.path.splitext(basefn)[0] outfn = os.path.join(args.o, f'{basename}.convert.add.strand.tsv.gz') convert_bismark_add_strand_and_seq(df, outfn) def filter_noncg_sites_mpi(df, ntask=300, toolname='tombo'): """ MPI version of filter out non-CG patterns :return: """ basefn = os.path.basename(args.i) basename = os.path.splitext(basefn)[0] all_list = list(range(len(df))) # Store each sub-process return results df_list = [] with Pool(processes=args.processors) as pool: for epoch in range(ntask): cpg_pattern_index = subset_of_list(all_list, ntask, epoch + 1) seldf = df.iloc[cpg_pattern_index, :] if toolname == 'tombo': df_list.append(pool.apply_async(filter_noncg_sites_ref_seq_mpi, (seldf, basename, ntask, epoch + 1))) elif toolname == 'deepmod': df_list.append(pool.apply_async(filter_noncg_sites_ref_seq_mpi, (seldf, basename, ntask, epoch + 1), dict(chr_col=0, start_col=1, strand_col=5, toolname='deepmod'))) elif toolname == 'deepmod-read-level': df_list.append(pool.apply_async(filter_noncg_sites_ref_seq_mpi, (seldf, basename, ntask, epoch + 1), dict(chr_col=0, start_col=1, strand_col=5, toolname='deepmod-read-level'))) else: raise Exception(f"{toolname} is no valid.") pool.close() pool.join() # Combine df logger.debug("Start to combine all results") df_list = [df1.get() for df1 in df_list] retdf = pd.concat(df_list) logger.debug(retdf) ## Note: original input=K562.tombo.perReadsStats.combine.tsv ## output=K562.tombo.perReadsStatsOnlyCpG.combine.tsv if toolname == 'tombo': basefn = basefn.replace("perReadsStats", "perReadsStatsOnlyCG").replace("combined", "combine") elif toolname == 'deepmod': ## Example: HL60.deepmod.C.combined.tsv basefn = basefn.replace(".C.", ".C_OnlyCG.").replace("combined", "combine") else: raise Exception(f"{toolname} is no valid.") outfn = os.path.join(args.o, f'{basefn}') retdf.to_csv(outfn, sep='\t', index=False, header=False) logger.debug(f"Save to {outfn}") def filter_noncg_sites_for_deepmod( deepmod_fn='/projects/li-lab/yang/workspace/nano-compare/data/tools-call-data/K562/K562.deepmod_combined.bed', sam_fn='/projects/li-lab/yang/results/12-09/K562.nanopolish/K562.sorted.bam', ntask=1, ttask=1, num_seq=5): if args.i is not None: deepmod_fn = args.i df = load_deepmod_df(infn=deepmod_fn) basefn = os.path.basename(deepmod_fn) basename = os.path.splitext(basefn)[0] filter_noncg_sites_ref_seq(df=df, tagname=basename, ntask=ntask, ttask=ttask, num_seq=num_seq, chr_col=0, start_col=1, strand_col=5, toolname='deepmod') def subset_of_list(alist, n, t): """ Subset of a list for multi-processing n=1 to 100 t=1 to N return subset list of alist :param alist: :param n: :param t: :return: """ if t < 1 or t > n: raise Exception(f't={t} is not accept, must be 1-N (include)') if n > len(alist): # if n is bigger than all list, return only 1 for t<=len if t <= len(alist): return [alist[t - 1]] else: return None m = int(len(alist) / n) # each task of a section of list start_index = int((t - 1) * m) if t == n: sublist = alist[start_index:] else: sublist = alist[start_index:start_index + m] # logger.debug(f'n={n}, t={t}, section={m}, index={start_index}:{start_index + m}') return sublist def get_f5_readid_map(flist): f5_readid_map = defaultdict(str) for fn in flist: basename = os.path.basename(fn) with get_fast5_file(fn, mode="r") as f5: # if len(f5.get_reads()) >= 2: # raise Exception(f'We can only deal with one read in fast5, but fn={fn}, contains {len(f5.get_reads())} multiple reads') for read in f5.get_reads(): f5_readid_map[basename] = str(read.read_id) return f5_readid_map def build_map_fast5_to_readid_mp( basedir='/fastscratch/liuya/nanocompare/K562-Runs/K562-DeepMod-N50/K562-DeepMod-N50-basecall', ntask=300): patfn = os.path.join(basedir, '**', '*.fast5') fast5_flist = glob.glob(patfn, recursive=True) logger.info(f'Total fast5 files: {len(fast5_flist)}') ret_list = [] with Pool(processes=args.processors) as pool: for epoch in range(ntask): subflist = subset_of_list(fast5_flist, ntask, epoch + 1) ret_list.append(pool.apply_async(get_f5_readid_map, (subflist,))) pool.close() pool.join() logger.debug('Finish fast5 to read-id mapping') f5_readid_map = defaultdict(str) for ret in ret_list: f5_readid_map.update(ret.get()) # for fn in fast5_flist[:]: # # logger.debug(fn) # basename = os.path.basename(fn) # # with get_fast5_file(fn, mode="r") as f5: # for read in f5.get_reads(): # # logger.debug(read.read_id) # f5_readid_map[basename] = str(read.read_id) return f5_readid_map def process_pred_detail_f5file(fn, f5_readid_map): """ For each deepmod prediction results file, we analyze a df result of read-level results :param fn: :param f5_readid_map: :return: """ f5_pred_key = '/pred/pred_0/predetail' dflist = [] with h5py.File(fn, 'r') as mr: # m_pred = mr[f5_pred_key].value # logger.debug(m_pred) for name in mr['/pred']: # logger.debug(name) pred_num_key = f'/pred/{name}' f5file = os.path.basename(mr[pred_num_key].attrs['f5file']) mapped_chr = mr[pred_num_key].attrs['mapped_chr'] mapped_strand = mr[pred_num_key].attrs['mapped_strand'] # logger.debug(f'{pred_num_key}: chr={mapped_chr}, strand={mapped_strand}, f5file={f5file}') pred_detail_key = f'{pred_num_key}/predetail' # m_pred = mr[pred_detail_key].value m_pred = mr[pred_detail_key][()] m_pred = np.array(m_pred, dtype=[('refbase', 'U1'), ('readbase', 'U1'), ('refbasei', np.uint64), ('readbasei', np.uint64), ('mod_pred', np.int)]) dataset = [] for mi in range(len(m_pred)): if m_pred['refbase'][mi] not in ['C']: continue if m_pred['refbase'][mi] in ['-', 'N', 'n']: continue # if m_pred['readbase'][mi] == '-': # continue # Filter non-CG patterns results ret = get_dna_base_from_reference(mapped_chr, int(m_pred['refbasei'][mi]), ref_fasta=ref_fasta) if mapped_strand == '+': if ret[5:7] != 'CG': continue elif mapped_strand == '-': if ret[4:6] != 'CG': continue if -0.1 < m_pred['mod_pred'][mi] - 1 < 0.1: meth_indicator = 1 else: meth_indicator = 0 # sp_options['4NA'][m_pred['refbase'][mi]][(cur_chr, cur_strand, int(m_pred['refbasei'][mi]) )][0] += 1 ret = {'start': int(m_pred['refbasei'][mi]), 'pred': meth_indicator, 'base': m_pred['refbase'][mi], 'sequence': ret} dataset.append(ret) df = pd.DataFrame(dataset) if len(df) < 1: continue df['chr'] = str(mapped_chr) df['end'] = df['start'] + 1 df['strand'] = str(mapped_strand) df['read-id'] = f5_readid_map[f5file] df = df[['chr', 'start', 'end', 'read-id', 'base', 'strand', 'sequence', 'pred']] # logger.info(df) dflist.append(df) sumdf = pd.concat(dflist) # logger.debug(f'Process pred detail file {fn} finished, total reads={len(sumdf)}.') return sumdf def extract_deepmod_read_level_results_mp( basecallDir='/fastscratch/liuya/nanocompare/K562-Runs/K562-DeepMod-N50/K562-DeepMod-N50-basecall', methcallDir='/fastscratch/liuya/nanocompare/K562-Runs/K562-DeepMod-N50/K562-DeepMod-N50-methcall', ntask=50): f5_readid_map = build_map_fast5_to_readid_mp(basedir=basecallDir, ntask=ntask) # logger.debug(f5_readid_map) # dirname = '/fastscratch/liuya/nanocompare/K562-Runs/K562-DeepMod-N50/K562-DeepMod-N50-methcall/**/rnn.pred.detail.fast5.*' dirname = os.path.join(methcallDir, '**', 'rnn.pred.detail.fast5.*') fast5_flist = glob.glob(dirname, recursive=True) logger.info(f'Total deepmod fast5 files:{len(fast5_flist)}') dflist = [] with Pool(processes=args.processors) as pool: for fn in fast5_flist[:]: # df = process_pred_detail_f5file(fn, f5_readid_map) dflist.append(pool.apply_async(process_pred_detail_f5file, (fn, f5_readid_map,))) # logger.debug(df) # logger.debug(df.iloc[1, :]) # logger.debug(fn) # pass pool.close() pool.join() dflist = [df.get() for df in dflist] sumdf = pd.concat(dflist) logger.debug('Finish get df from deepmod fast5 predetail files') cpgDict = defaultdict(lambda: [0, 0]) # 0:cov, 1:meth-cov for index, row in sumdf.iterrows(): chr = row['chr'] start = row['start'] strand = row['strand'] basekey = (chr, start, strand) cpgDict[basekey][0] += 1 if row['pred'] == 1: cpgDict[basekey][1] += 1 logger.debug(f'CpG sites={len(cpgDict)}') dataset = [] for site in cpgDict: ret = {'chr': site[0], 'start': site[1], 'end': site[1] + 1, 'base': 'C', 'cap-cov': cpgDict[site][0], 'strand': site[2], 'no-use1': '', 'start1': site[1], 'end1': site[1] + 1, 'no-use2': '0,0,0', 'cov': cpgDict[site][0], 'meth-freq': int(100 * cpgDict[site][1] / cpgDict[site][0]), 'meth-cov': cpgDict[site][1]} dataset.append(ret) beddf = pd.DataFrame(dataset) beddf = beddf[ ['chr', 'start', 'end', 'base', 'cap-cov', 'strand', 'no-use1', 'start1', 'end1', 'no-use2', 'cov', 'meth-freq', 'meth-cov']] logger.debug('Finish bed df, extract all DONE.') return sumdf, beddf def parse_arguments(): """ :return: """ parser = argparse.ArgumentParser(description='Multi-task') parser.add_argument("cmd", help="name of command: compute, combine, or gen-pixel-info") parser.add_argument('-n', type=int, help="the total number of tasks (1-27)", default=1) parser.add_argument('-t', type=int, help="the current task id (1-N)", default=1) parser.add_argument('-i', type=str, help="input file", default=None) parser.add_argument('-o', type=str, help="output dir or file", default=pic_base_dir) parser.add_argument('--o2', type=str, help="second output dir or file", default=None) parser.add_argument('--ibam', type=str, help="input bam/sam file", default=None) parser.add_argument('--basecallDir', type=str, help="basecallDir dir name", default=None) parser.add_argument('--methcallDir', type=str, help="methcallDir dir name", default=None) parser.add_argument('--processors', type=int, help="Number of processors", default=8) parser.add_argument('--mpi', action='store_true') parser.add_argument('--chrs', nargs='+', help='all chrs need to check', default=[]) return parser.parse_args() def output_bed_by_bin(bin_id): num_bins = 5 density_col = 4 output_cols = [0, 1, 2] bin_value = int(bin_id / num_bins * 100 + 1e-5) logger.info(f"start with bin_id={bin_id}, bin_value={bin_value}") ndf = df[df[density_col] == bin_value] ndf = ndf.iloc[:, output_cols] logger.info(f"start to save, df={len(df):,}, ndf={len(ndf):,}, for bin_value={bin_value}") outfn = os.path.join(args.o, f"hg38.gc5Base.bin{bin_value}.bed.gz") ndf.to_csv(outfn, sep='\t', header=False, index=False) logger.info(f"save to {outfn}") def output_bed_by_bin2(infn, num_bins): inf = gzip.open(infn, 'rt') outf_list = [] for bin_id in range(0, num_bins + 1): bin_value = int(bin_id / num_bins * 100 + 1e-5) outf_list.append(gzip.open(os.path.join(args.o, f"hg38.gc5Base.bin{bin_value}.bed.gz"), 'wt')) for row in tqdm(inf): tmp = row.strip().split("\t") density_col = 4 bin_value = int(float(tmp[density_col]) + 1e-5) bin_id = bin_value // 20 if bin_id not in range(0, num_bins + 1): logger.error(f"Error found: bin_value={bin_value}, bin_id={bin_id}, for row={row}") raise Exception(f"Error found: bin_value={bin_value}, bin_id={bin_id}, for row={row}") outf_list[bin_id].write(f"{tmp[0]}\t{tmp[1]}\t{tmp[2]}\n") [outf.close for outf in outf_list] logger.info("Finished bin bed for gc density") def save_tss_bed_for_5hmc(infn, outfn): logger.info(f"open infn={infn}") df = pd.read_csv(infn, sep='\t', header=None) logger.debug(df) df = df.iloc[:, [0, 1, 2, 4, 7]] df.columns = ['chr', 'start', 'end', '5hmc_level', 'strand'] df['n1'] = '.' df['start'] = df['start'].astype(int) - 1 df['end'] = df['end'].astype(int) - 1 df['5hmc_level'] = df['5hmc_level'].astype(float) df = df[['chr', 'start', 'end', '5hmc_level', 'n1', 'strand']] logger.info(f"df['5hmc_level'] = {df["5hmc_level"].describe()}") logger.info(f"len(df['5hmc_level'] >= 1.0) = {(df.loc[:, "5hmc_level"] >= 1.0 - 1e-3).sum()}") df.to_csv(outfn, sep='\t', header=False, index=False) logger.info(f"save to {outfn}") pass if __name__ == '__main__': set_log_debug_level() args = parse_arguments() logger.debug(args) ref_fasta = None if args.cmd in ['tombo-add-seq', 'deepmod-add-seq', 'deepmod-read-level', 'sanity-check-seq', 'bismark-convert']: # These command will use reference genome ref_fn = '/projects/li-lab/Ziwei/Nanopore/data/reference/hg38.fa' ref_fasta = SeqIO.to_dict(SeqIO.parse(open(ref_fn), 'fasta')) if args.cmd == 'tombo-add-seq': if args.mpi: logger.debug('in mpi mode') import multiprocessing logger.debug( "There are %d CPUs on this machine by multiprocessing.cpu_count()" % multiprocessing.cpu_count()) df = load_tombo_df(infn=args.i) filter_noncg_sites_mpi(df) else: filter_noncg_sites_for_tombo(ntask=args.n, ttask=args.t) elif args.cmd == 'deepmod-add-seq': if args.mpi: logger.debug('in mpi mode') import multiprocessing logger.debug( "There are %d CPUs on this machine by multiprocessing.cpu_count()" % multiprocessing.cpu_count()) df = load_deepmod_df(infn=args.i) filter_noncg_sites_mpi(df, toolname='deepmod') else: filter_noncg_sites_for_deepmod(ntask=args.n, ttask=args.t) elif args.cmd == 'nanopolish-add-strand': add_strand_info_for_nanopolish() elif args.cmd == 'sanity-check-seq': ## bash meth_stats_tool.sh sanity-check-seq --chrs chr4:10164 chr4:10298 for chrstr in args.chrs: # logger.info(chrstr) sanity_check_get_dna_seq(chrstr) elif args.cmd == 'deepmod-read-level': ### Running bash: """ sbatch meth_stats_tool_mpi.sh deepmod-read-level --basecallDir /fastscratch/liuya/nanocompare/K562-Runs/K562-DeepMod-N50/K562-DeepMod-N50-basecall --methcallDir /fastscratch/liuya/nanocompare/K562-Runs/K562-DeepMod-N50/K562-DeepMod-N50-methcall -o /fastscratch/liuya/nanocompare/deepmod-read-level1.tsv --o2 /fastscratch/liuya/nanocompare/deepmod-read-level1-extract-output.bed """ sumdf, beddf = extract_deepmod_read_level_results_mp(basecallDir=args.basecallDir, methcallDir=args.methcallDir) logger.info(sumdf) logger.info(sumdf.iloc[1, :]) logger.info(sumdf['chr'].unique()) # outfn = os.path.join('/fastscratch/liuya/nanocompare/', 'deepmod-read-level.tsv') # Save read level results outfn = args.o sumdf.to_csv(outfn, sep='\t', index=False, header=False) logger.info(f'save to {outfn}') if args.o2: # Save CpG base level results bed file for cluster module use outfn = args.o2 beddf.to_csv(outfn, sep=' ', index=False, header=False) logger.info(f'save to {outfn}') elif args.cmd == 'bismark-convert': # Convert non-strand info bismark to strand ## bash meth_stats_tool.sh bismark-convert -i /pod/2/li-lab/Ziwei/Nanopore_methyl_compare/result/APL_BSseq/APL-bs_R1_val_1_bismark_bt2_pe.deduplicated.sorted.bed ## sbatch meth_stats_tool.sh bismark-convert -i /pod/2/li-lab/Ziwei/Nanopore_methyl_compare/result/APL_BSseq/APL-bs_R1_val_1_bismark_bt2_pe.deduplicated.sorted.bed df = pd.read_csv(args.i, sep='\t', header=None) if len(df.columns) != 6: raise Exception(f"Can no recognize input file format for infn={args.i}, df={df}") df.columns = ['chr', 'start', 'end', 'freq100', 'mcount', 'ccount'] logger.debug(df) convert_bismark_cov_to_gw_format(df) elif args.cmd == 'gc-density-bed': # sbatch meth_stats_tool.sh gc-density-bed infn = "/projects/li-lab/yang/workspace/nano-compare/data/genome-annotation/hg38.gc5Base.bed.gz" output_bed_by_bin2(infn, num_bins=5) if True: sys.exit(0) df = pd.read_csv(infn, sep='\t', header=None) df.iloc[:, 4] = df.iloc[:, 4].astype(int) logger.debug(df) bin_list = list(range(1, 6)) os.makedirs(args.o, exist_ok=True) with Pool(processes=args.processors) as pool: pool.map(output_bed_by_bin, bin_list) elif args.cmd == 'repetitive-bed': # sbatch meth_stats_tool.sh repetitive-bed # bash meth_stats_tool.sh repetitive-bed infn = "/projects/li-lab/yang/results/2021-07-01/hg38.repetitive.bed.gz" df = pd.read_csv(infn, sep='\t') df = df[df['genoName'].isin(humanChrSet)] df['n1'] = '.' df['n2'] = '.' logger.info(df) outfn = f"hg38.repetitive.rep_All.bed.gz" df[['genoName', 'genoStart', 'genoEnd', 'n1', 'n2', 'strand']].to_csv(os.path.join(args.o, outfn), sep='\t', header=False, index=False) region_dict = { "LINE": ["LINE"], "SINE": ["SINE"], "LTR": ["LTR"], "DNA": ["DNA"] } used_list = [] for key in region_dict: logger.info(f"seperate {key}") used_list += region_dict[key] ndf = df[df['repClass'].isin(region_dict[key])] ndf = ndf[['genoName', 'genoStart', 'genoEnd', 'n1', 'n2', 'strand']] # logger.info(ndf) outfn = f"hg38.repetitive.rep_{key}.bed.gz" ndf.to_csv(os.path.join(args.o, outfn), sep='\t', header=False, index=False) logger.info(f"len={len(ndf)}, save to {outfn}") ## Output others ndf = df[~df['repClass'].isin(used_list)] ndf = ndf[['genoName', 'genoStart', 'genoEnd', 'n1', 'n2', 'strand']] # logger.info(ndf) outfn = f"hg38.repetitive.rep_Others.bed.gz" ndf.to_csv(os.path.join(args.o, outfn), sep='\t', header=False, index=False) logger.info(f"len={len(ndf)}, save to {outfn}") elif args.cmd == 'apl-5hmc-bed': # Extract TSS format BED file for 5hmC # convert 1-based to 0-based results, output 5hmc level # bash meth_stats_tool.sh apl-5hmc-bed # file will be later converted into BW file infn = "/pod/2/li-lab/Nanopore_compare/data/APL_5hmC_BSseq/APL.cov5.mlml.addstrand.selected.bed.gz" outfn = os.path.join(args.o, "APL.5hmc.tss.cov5.bed.gz") save_tss_bed_for_5hmc(infn, outfn) infn = "/pod/2/li-lab/Nanopore_compare/data/APL_5hmC_BSseq/APL.mlml.addstrand.selected.bed.gz" outfn = os.path.join(args.o, "APL.5hmc.tss.cov1.bed.gz") save_tss_bed_for_5hmc(infn, outfn) pass elif args.cmd == 'merge-basecall-summary': ## sbatch meth_stats_tool.sh merge-basecall-summary -i /projects/li-lab/yang/results/2021-07-17/NA12878_basecall_logs_output baseDir = args.i flist = glob.glob(os.path.join(baseDir, '**', '*sequencing_summary.txt')) logger.info(flist) logger.info(len(flist)) dflist = [] for fn in flist: df = pd.read_csv(fn, sep='\t') dflist.append(df) dfall = pd.concat(dflist) outfn = os.path.join(args.o, 'NA12878-allChrs-basecall.sequencing_summary.txt') dfall.to_csv(outfn, sep='\t', index=False) logger.info(f"save to {outfn}") else: raise Exception(f"Not support command={args.cmd}") logger.info("meth_stats_tool DONE")
#!/usr/bin/env python3 # @Author : Yang Liu # @FileName : meth_stats_tool.py # @Software : NANOME project # @Organization : JAX Li Lab # @Website : https://github.com/TheJacksonLaboratory/nanome """ Tool for pre-processing results """ import argparse import glob import gzip import sys from collections import defaultdict from multiprocessing import Pool import h5py import numpy as np import pandas as pd from Bio import SeqIO from ont_fast5_api.fast5_interface import get_fast5_file from tqdm import tqdm from nanocompare.eval_common import load_tombo_df, load_deepmod_df, get_dna_base_from_reference, \ load_sam_as_strand_info_df, load_nanopolish_df from nanocompare.global_config import * from nanocompare.global_settings import humanChrSet def add_strand_info_for_nanopolish( nanopolish_fn='/projects/li-lab/yang/results/12-09/K562.nanopolish/K562.methylation_calls.tsv', sam_fn='/projects/li-lab/yang/results/12-09/K562.nanopolish/K562.sam'): """ No need for new nanopolish output Combine the nanopolish output tsv results with strand-info from SAM files. This will add last column as strand-info. This is due to original nanopolish output results contain no strand-info, we are going to solve this problem. Return results columns are: [(0, 'chromosome'), (1, 'start'), (2, 'end'), (3, 'read_name'), (4, 'log_lik_ratio'), (5, 'log_lik_methylated'), (6, 'log_lik_unmethylated'), (7, 'num_calling_strands'), (8, 'num_cpgs'), (9, 'sequence'), (10, 'strand-info')] :param nanopolish_fn: nanopolish file name :param sam_fn: SAM file name for strand-info :return: """ if args.i is not None: nanopolish_fn = args.i if args.ibam is not None: sam_fn = args.ibam df2 = load_sam_as_strand_info_df(infn=sam_fn) df1 = load_nanopolish_df(infn=nanopolish_fn) df = df1.merge(df2, left_on='read_name', right_on='read-name', how='left') df = df.drop('read-name', axis=1) logger.info(df) logger.info(list(enumerate(df.columns))) if len(df1) != len(df): raise Exception( "We found the read-name of Nanopolish results is not mapped all to SAM/BAM file, please check if the BAM file is used for Nanopolish") # df = df.iloc[:, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]] outfn = os.path.join(pic_base_dir, f'{os.path.splitext(os.path.basename(nanopolish_fn))[0]}-nanopolish-strand-info.tsv') df.to_csv(outfn, sep='\t', index=False) logger.info(f'save to {outfn}') return df def sanity_check_get_dna_seq(chrstr): """ Check 0-based start, input as 'chr1:123' :param chrstr: :return: """ chr, start = chrstr.strip().split(':') start = int(start) show_arrow = ''.join(['~'] * 5 + ['↑'] + ['~'] * 5) ret = get_dna_base_from_reference(chr, start, ref_fasta=ref_fasta) logger.info(f'chr={chr}, start={start}\nSEQ={ret}\nPOS={show_arrow}') def filter_noncg_sites_ref_seq(df, tagname, ntask=1, ttask=1, num_seq=5, chr_col=0, start_col=1, strand_col=5, toolname='tombo'): """ Filter out rows that are non-CG patterns in Tombo results, reference sequence is based on BAM files from SAM to BAM (with index) script is as follows: samtools view -S -b K562.sam > K562.bam samtools sort -o K562.sorted.bam K562.bam samtools index K562.sorted.bam :param tombo_fn: :param sam_fn: :return: """ chrs = df.iloc[:, chr_col].unique() chrs = np.sort(chrs) logger.info(chrs) logger.info(len(chrs)) all_list = list(range(len(df))) cpg_pattern_index = subset_of_list(all_list, ntask, ttask) # sel_chrs = subset_of_list(chrs, ntask, ttask) # logger.info(sel_chrs) # df = df[df[0].isin(sel_chrs)] df = df.iloc[cpg_pattern_index, :] logger.info(df) rep_chr = df.iloc[0, chr_col] seq_col = [] cpg_pattern_index = [] print_first = True for index, row in tqdm(df.iterrows()): if print_first: logger.info(f"index={index}, row={row}") print_first = False chr = row[chr_col] start = int(row[start_col]) strand_info = row[strand_col] # ret = get_dna_sequence_from_samfile(chr, start, start + num_seq, samfile) # may return None, if no sequence at all reads ret = get_dna_base_from_reference(chr, start, num_seq=num_seq, ref_fasta=ref_fasta) seq_col.append(ret) if toolname == 'tombo': if ret[5:7] == 'CG': cpg_pattern_index.append(index) elif toolname == 'deepmod': if strand_info == '+': if ret[5:7] == 'CG': cpg_pattern_index.append(index) elif strand_info == '-': if ret[4:6] == 'CG': cpg_pattern_index.append(index) # TODO: using ret if it is CG pattern, or will remove later # logger.info(f'chr={chr}, start={start}, strand={strand_info}, ret={ret}') # if index > 10000: # break df['sequence'] = seq_col logger.debug(f'before filter:{len(df)}, after non-CG filter:{len(cpg_pattern_index)}') df = df.loc[cpg_pattern_index, :] # tagname is like 'K562.tombo.perReadsStats.combine' # then outfn is like 'K562.tombo.perReadsStats.combine-with-seq-info-n300-t001-chr1.tsv' outfn = os.path.join(args.o, f'{tagname}-with-seq-info-n{ntask}-t{ttask:03d}-{rep_chr}.tsv') df.to_csv(outfn, sep='\t', header=False, index=False) logger.info(f"save to {outfn}") def filter_noncg_sites_ref_seq_mpi(df, tagname, ntask=1, ttask=1, num_dna_seq=5, chr_col=0, start_col=1, strand_col=5, toolname='tombo', print_first=False): """ MPI version invoke like: res = p.apply_async(testFunc, args=(2, 4), kwds={'calcY': False}) or pool.apply_async(test, (t,), dict(arg2=5)) Filter out rows that are non-CG patterns in Tombo results, reference sequence is based on BAM files :param tombo_fn: :param sam_fn: :return: """ rep_chr = df.iloc[0, chr_col] seq_col = [] only_cpg_pattern_index = [] for index, row in df.iterrows(): if print_first: logger.info(f"index={index}, row={row}") print_first = False chr = row[chr_col] start = int(row[start_col]) strand_info = row[strand_col] ret = get_dna_base_from_reference(chr, start, num_seq=num_dna_seq, ref_fasta=ref_fasta) seq_col.append(ret) if toolname == 'tombo': if ret[5:7] == 'CG': only_cpg_pattern_index.append(index) elif toolname in ['deepmod', 'deepmod-read-level']: if strand_info == '+': if ret[5:7] == 'CG': only_cpg_pattern_index.append(index) elif strand_info == '-': if ret[4:6] == 'CG': only_cpg_pattern_index.append(index) df['sequence'] = seq_col # logger.debug(f'Subprocess [{ttask}:{ntask}] finished, before filter:{len(df)}, after non-CG filter:{len(only_cpg_pattern_index)}') df = df.loc[only_cpg_pattern_index, :] # tagname is like 'K562.tombo.perReadsStats.combine' # then outfn is like 'K562.tombo.perReadsStats.combine-with-seq-info-n300-t001-chr1.tsv' # outfn = os.path.join(args.o, f'{tagname}-with-seq-info-n{ntask}-t{ttask:03d}-{rep_chr}.tsv') # df.to_csv(outfn, sep='\t', header=False, index=False) # logger.info(f"save to {outfn}") logger.info(f"Finished of subprocess {ttask}:{ntask}") return df def filter_noncg_sites_for_tombo( tombo_fn='/projects/li-lab/yang/workspace/nano-compare/data/tools-call-data/K562/K562.tombo_perReadsStats.bed', sam_fn='/projects/li-lab/yang/results/12-09/K562.nanopolish/K562.sorted.bam', ntask=1, ttask=1, num_seq=5): if args.i is not None: tombo_fn = args.i df = load_tombo_df(infn=tombo_fn) basefn = os.path.basename(tombo_fn) basename = os.path.splitext(basefn)[0] filter_noncg_sites_ref_seq(df=df, tagname=basename, ntask=ntask, ttask=ttask, num_seq=num_seq) def convert_bismark_add_strand_and_seq(indf, outfn, report_num=None): """ Check start pointer, if point to CG's C, it is positive strand, or else, it is reverse strand Note: input file is 1-based start, we also output to a 1-based format that is compatable to our Bismark import functions. :param indf: :param outf: :param report_num: :return: """ logger.debug(f'Start add strand and seq to bismark cov file, total len={len(indf)}') outf = gzip.open(outfn, 'wt') for index, row in tqdm(indf.iterrows()): # if report_num and index % report_num == 0: # logger.debug(f'processed index={index}') chr = row['chr'] start = int(row['start']) # Keep raw 1-based format of bismark results ret = get_dna_base_from_reference(chr, start - 1, ref_fasta=ref_fasta) if ret[5] == 'C': # strand is + strand = '+' elif ret[5] == 'G': strand = '-' else: raise Exception(f'We can not identify this bg-truth file with non-CG results, such as row={row}') outstr = '\t'.join([chr, str(start), strand, str(row['mcount']), str(row['ccount']), ret[4:7]]) outf.write(f'{outstr}\n') outf.close() logger.info(f'save to {outfn}') logger.debug(f'Finish add strand info task') def convert_bismark_cov_to_gw_format(df): """ Save adding strand info and dna seq format, which is in same format of Bismark Genome-wide output files :param df: :return: """ basefn = os.path.basename(args.i) basename = os.path.splitext(basefn)[0] outfn = os.path.join(args.o, f'{basename}.convert.add.strand.tsv.gz') convert_bismark_add_strand_and_seq(df, outfn) def filter_noncg_sites_mpi(df, ntask=300, toolname='tombo'): """ MPI version of filter out non-CG patterns :return: """ basefn = os.path.basename(args.i) basename = os.path.splitext(basefn)[0] all_list = list(range(len(df))) # Store each sub-process return results df_list = [] with Pool(processes=args.processors) as pool: for epoch in range(ntask): cpg_pattern_index = subset_of_list(all_list, ntask, epoch + 1) seldf = df.iloc[cpg_pattern_index, :] if toolname == 'tombo': df_list.append(pool.apply_async(filter_noncg_sites_ref_seq_mpi, (seldf, basename, ntask, epoch + 1))) elif toolname == 'deepmod': df_list.append(pool.apply_async(filter_noncg_sites_ref_seq_mpi, (seldf, basename, ntask, epoch + 1), dict(chr_col=0, start_col=1, strand_col=5, toolname='deepmod'))) elif toolname == 'deepmod-read-level': df_list.append(pool.apply_async(filter_noncg_sites_ref_seq_mpi, (seldf, basename, ntask, epoch + 1), dict(chr_col=0, start_col=1, strand_col=5, toolname='deepmod-read-level'))) else: raise Exception(f"{toolname} is no valid.") pool.close() pool.join() # Combine df logger.debug("Start to combine all results") df_list = [df1.get() for df1 in df_list] retdf = pd.concat(df_list) logger.debug(retdf) ## Note: original input=K562.tombo.perReadsStats.combine.tsv ## output=K562.tombo.perReadsStatsOnlyCpG.combine.tsv if toolname == 'tombo': basefn = basefn.replace("perReadsStats", "perReadsStatsOnlyCG").replace("combined", "combine") elif toolname == 'deepmod': ## Example: HL60.deepmod.C.combined.tsv basefn = basefn.replace(".C.", ".C_OnlyCG.").replace("combined", "combine") else: raise Exception(f"{toolname} is no valid.") outfn = os.path.join(args.o, f'{basefn}') retdf.to_csv(outfn, sep='\t', index=False, header=False) logger.debug(f"Save to {outfn}") def filter_noncg_sites_for_deepmod( deepmod_fn='/projects/li-lab/yang/workspace/nano-compare/data/tools-call-data/K562/K562.deepmod_combined.bed', sam_fn='/projects/li-lab/yang/results/12-09/K562.nanopolish/K562.sorted.bam', ntask=1, ttask=1, num_seq=5): if args.i is not None: deepmod_fn = args.i df = load_deepmod_df(infn=deepmod_fn) basefn = os.path.basename(deepmod_fn) basename = os.path.splitext(basefn)[0] filter_noncg_sites_ref_seq(df=df, tagname=basename, ntask=ntask, ttask=ttask, num_seq=num_seq, chr_col=0, start_col=1, strand_col=5, toolname='deepmod') def subset_of_list(alist, n, t): """ Subset of a list for multi-processing n=1 to 100 t=1 to N return subset list of alist :param alist: :param n: :param t: :return: """ if t < 1 or t > n: raise Exception(f't={t} is not accept, must be 1-N (include)') if n > len(alist): # if n is bigger than all list, return only 1 for t<=len if t <= len(alist): return [alist[t - 1]] else: return None m = int(len(alist) / n) # each task of a section of list start_index = int((t - 1) * m) if t == n: sublist = alist[start_index:] else: sublist = alist[start_index:start_index + m] # logger.debug(f'n={n}, t={t}, section={m}, index={start_index}:{start_index + m}') return sublist def get_f5_readid_map(flist): f5_readid_map = defaultdict(str) for fn in flist: basename = os.path.basename(fn) with get_fast5_file(fn, mode="r") as f5: # if len(f5.get_reads()) >= 2: # raise Exception(f'We can only deal with one read in fast5, but fn={fn}, contains {len(f5.get_reads())} multiple reads') for read in f5.get_reads(): f5_readid_map[basename] = str(read.read_id) return f5_readid_map def build_map_fast5_to_readid_mp( basedir='/fastscratch/liuya/nanocompare/K562-Runs/K562-DeepMod-N50/K562-DeepMod-N50-basecall', ntask=300): patfn = os.path.join(basedir, '**', '*.fast5') fast5_flist = glob.glob(patfn, recursive=True) logger.info(f'Total fast5 files: {len(fast5_flist)}') ret_list = [] with Pool(processes=args.processors) as pool: for epoch in range(ntask): subflist = subset_of_list(fast5_flist, ntask, epoch + 1) ret_list.append(pool.apply_async(get_f5_readid_map, (subflist,))) pool.close() pool.join() logger.debug('Finish fast5 to read-id mapping') f5_readid_map = defaultdict(str) for ret in ret_list: f5_readid_map.update(ret.get()) # for fn in fast5_flist[:]: # # logger.debug(fn) # basename = os.path.basename(fn) # # with get_fast5_file(fn, mode="r") as f5: # for read in f5.get_reads(): # # logger.debug(read.read_id) # f5_readid_map[basename] = str(read.read_id) return f5_readid_map def process_pred_detail_f5file(fn, f5_readid_map): """ For each deepmod prediction results file, we analyze a df result of read-level results :param fn: :param f5_readid_map: :return: """ f5_pred_key = '/pred/pred_0/predetail' dflist = [] with h5py.File(fn, 'r') as mr: # m_pred = mr[f5_pred_key].value # logger.debug(m_pred) for name in mr['/pred']: # logger.debug(name) pred_num_key = f'/pred/{name}' f5file = os.path.basename(mr[pred_num_key].attrs['f5file']) mapped_chr = mr[pred_num_key].attrs['mapped_chr'] mapped_strand = mr[pred_num_key].attrs['mapped_strand'] # logger.debug(f'{pred_num_key}: chr={mapped_chr}, strand={mapped_strand}, f5file={f5file}') pred_detail_key = f'{pred_num_key}/predetail' # m_pred = mr[pred_detail_key].value m_pred = mr[pred_detail_key][()] m_pred = np.array(m_pred, dtype=[('refbase', 'U1'), ('readbase', 'U1'), ('refbasei', np.uint64), ('readbasei', np.uint64), ('mod_pred', np.int)]) dataset = [] for mi in range(len(m_pred)): if m_pred['refbase'][mi] not in ['C']: continue if m_pred['refbase'][mi] in ['-', 'N', 'n']: continue # if m_pred['readbase'][mi] == '-': # continue # Filter non-CG patterns results ret = get_dna_base_from_reference(mapped_chr, int(m_pred['refbasei'][mi]), ref_fasta=ref_fasta) if mapped_strand == '+': if ret[5:7] != 'CG': continue elif mapped_strand == '-': if ret[4:6] != 'CG': continue if -0.1 < m_pred['mod_pred'][mi] - 1 < 0.1: meth_indicator = 1 else: meth_indicator = 0 # sp_options['4NA'][m_pred['refbase'][mi]][(cur_chr, cur_strand, int(m_pred['refbasei'][mi]) )][0] += 1 ret = {'start': int(m_pred['refbasei'][mi]), 'pred': meth_indicator, 'base': m_pred['refbase'][mi], 'sequence': ret} dataset.append(ret) df = pd.DataFrame(dataset) if len(df) < 1: continue df['chr'] = str(mapped_chr) df['end'] = df['start'] + 1 df['strand'] = str(mapped_strand) df['read-id'] = f5_readid_map[f5file] df = df[['chr', 'start', 'end', 'read-id', 'base', 'strand', 'sequence', 'pred']] # logger.info(df) dflist.append(df) sumdf = pd.concat(dflist) # logger.debug(f'Process pred detail file {fn} finished, total reads={len(sumdf)}.') return sumdf def extract_deepmod_read_level_results_mp( basecallDir='/fastscratch/liuya/nanocompare/K562-Runs/K562-DeepMod-N50/K562-DeepMod-N50-basecall', methcallDir='/fastscratch/liuya/nanocompare/K562-Runs/K562-DeepMod-N50/K562-DeepMod-N50-methcall', ntask=50): f5_readid_map = build_map_fast5_to_readid_mp(basedir=basecallDir, ntask=ntask) # logger.debug(f5_readid_map) # dirname = '/fastscratch/liuya/nanocompare/K562-Runs/K562-DeepMod-N50/K562-DeepMod-N50-methcall/**/rnn.pred.detail.fast5.*' dirname = os.path.join(methcallDir, '**', 'rnn.pred.detail.fast5.*') fast5_flist = glob.glob(dirname, recursive=True) logger.info(f'Total deepmod fast5 files:{len(fast5_flist)}') dflist = [] with Pool(processes=args.processors) as pool: for fn in fast5_flist[:]: # df = process_pred_detail_f5file(fn, f5_readid_map) dflist.append(pool.apply_async(process_pred_detail_f5file, (fn, f5_readid_map,))) # logger.debug(df) # logger.debug(df.iloc[1, :]) # logger.debug(fn) # pass pool.close() pool.join() dflist = [df.get() for df in dflist] sumdf = pd.concat(dflist) logger.debug('Finish get df from deepmod fast5 predetail files') cpgDict = defaultdict(lambda: [0, 0]) # 0:cov, 1:meth-cov for index, row in sumdf.iterrows(): chr = row['chr'] start = row['start'] strand = row['strand'] basekey = (chr, start, strand) cpgDict[basekey][0] += 1 if row['pred'] == 1: cpgDict[basekey][1] += 1 logger.debug(f'CpG sites={len(cpgDict)}') dataset = [] for site in cpgDict: ret = {'chr': site[0], 'start': site[1], 'end': site[1] + 1, 'base': 'C', 'cap-cov': cpgDict[site][0], 'strand': site[2], 'no-use1': '', 'start1': site[1], 'end1': site[1] + 1, 'no-use2': '0,0,0', 'cov': cpgDict[site][0], 'meth-freq': int(100 * cpgDict[site][1] / cpgDict[site][0]), 'meth-cov': cpgDict[site][1]} dataset.append(ret) beddf = pd.DataFrame(dataset) beddf = beddf[ ['chr', 'start', 'end', 'base', 'cap-cov', 'strand', 'no-use1', 'start1', 'end1', 'no-use2', 'cov', 'meth-freq', 'meth-cov']] logger.debug('Finish bed df, extract all DONE.') return sumdf, beddf def parse_arguments(): """ :return: """ parser = argparse.ArgumentParser(description='Multi-task') parser.add_argument("cmd", help="name of command: compute, combine, or gen-pixel-info") parser.add_argument('-n', type=int, help="the total number of tasks (1-27)", default=1) parser.add_argument('-t', type=int, help="the current task id (1-N)", default=1) parser.add_argument('-i', type=str, help="input file", default=None) parser.add_argument('-o', type=str, help="output dir or file", default=pic_base_dir) parser.add_argument('--o2', type=str, help="second output dir or file", default=None) parser.add_argument('--ibam', type=str, help="input bam/sam file", default=None) parser.add_argument('--basecallDir', type=str, help="basecallDir dir name", default=None) parser.add_argument('--methcallDir', type=str, help="methcallDir dir name", default=None) parser.add_argument('--processors', type=int, help="Number of processors", default=8) parser.add_argument('--mpi', action='store_true') parser.add_argument('--chrs', nargs='+', help='all chrs need to check', default=[]) return parser.parse_args() def output_bed_by_bin(bin_id): num_bins = 5 density_col = 4 output_cols = [0, 1, 2] bin_value = int(bin_id / num_bins * 100 + 1e-5) logger.info(f"start with bin_id={bin_id}, bin_value={bin_value}") ndf = df[df[density_col] == bin_value] ndf = ndf.iloc[:, output_cols] logger.info(f"start to save, df={len(df):,}, ndf={len(ndf):,}, for bin_value={bin_value}") outfn = os.path.join(args.o, f"hg38.gc5Base.bin{bin_value}.bed.gz") ndf.to_csv(outfn, sep='\t', header=False, index=False) logger.info(f"save to {outfn}") def output_bed_by_bin2(infn, num_bins): inf = gzip.open(infn, 'rt') outf_list = [] for bin_id in range(0, num_bins + 1): bin_value = int(bin_id / num_bins * 100 + 1e-5) outf_list.append(gzip.open(os.path.join(args.o, f"hg38.gc5Base.bin{bin_value}.bed.gz"), 'wt')) for row in tqdm(inf): tmp = row.strip().split("\t") density_col = 4 bin_value = int(float(tmp[density_col]) + 1e-5) bin_id = bin_value // 20 if bin_id not in range(0, num_bins + 1): logger.error(f"Error found: bin_value={bin_value}, bin_id={bin_id}, for row={row}") raise Exception(f"Error found: bin_value={bin_value}, bin_id={bin_id}, for row={row}") outf_list[bin_id].write(f"{tmp[0]}\t{tmp[1]}\t{tmp[2]}\n") [outf.close for outf in outf_list] logger.info("Finished bin bed for gc density") def save_tss_bed_for_5hmc(infn, outfn): logger.info(f"open infn={infn}") df = pd.read_csv(infn, sep='\t', header=None) logger.debug(df) df = df.iloc[:, [0, 1, 2, 4, 7]] df.columns = ['chr', 'start', 'end', '5hmc_level', 'strand'] df['n1'] = '.' df['start'] = df['start'].astype(int) - 1 df['end'] = df['end'].astype(int) - 1 df['5hmc_level'] = df['5hmc_level'].astype(float) df = df[['chr', 'start', 'end', '5hmc_level', 'n1', 'strand']] logger.info(f"df['5hmc_level'] = {df['5hmc_level'].describe()}") logger.info(f"len(df['5hmc_level'] >= 1.0) = {(df.loc[:, '5hmc_level'] >= 1.0 - 1e-3).sum()}") df.to_csv(outfn, sep='\t', header=False, index=False) logger.info(f"save to {outfn}") pass if __name__ == '__main__': set_log_debug_level() args = parse_arguments() logger.debug(args) ref_fasta = None if args.cmd in ['tombo-add-seq', 'deepmod-add-seq', 'deepmod-read-level', 'sanity-check-seq', 'bismark-convert']: # These command will use reference genome ref_fn = '/projects/li-lab/Ziwei/Nanopore/data/reference/hg38.fa' ref_fasta = SeqIO.to_dict(SeqIO.parse(open(ref_fn), 'fasta')) if args.cmd == 'tombo-add-seq': if args.mpi: logger.debug('in mpi mode') import multiprocessing logger.debug( "There are %d CPUs on this machine by multiprocessing.cpu_count()" % multiprocessing.cpu_count()) df = load_tombo_df(infn=args.i) filter_noncg_sites_mpi(df) else: filter_noncg_sites_for_tombo(ntask=args.n, ttask=args.t) elif args.cmd == 'deepmod-add-seq': if args.mpi: logger.debug('in mpi mode') import multiprocessing logger.debug( "There are %d CPUs on this machine by multiprocessing.cpu_count()" % multiprocessing.cpu_count()) df = load_deepmod_df(infn=args.i) filter_noncg_sites_mpi(df, toolname='deepmod') else: filter_noncg_sites_for_deepmod(ntask=args.n, ttask=args.t) elif args.cmd == 'nanopolish-add-strand': add_strand_info_for_nanopolish() elif args.cmd == 'sanity-check-seq': ## bash meth_stats_tool.sh sanity-check-seq --chrs chr4:10164 chr4:10298 for chrstr in args.chrs: # logger.info(chrstr) sanity_check_get_dna_seq(chrstr) elif args.cmd == 'deepmod-read-level': ### Running bash: """ sbatch meth_stats_tool_mpi.sh deepmod-read-level --basecallDir /fastscratch/liuya/nanocompare/K562-Runs/K562-DeepMod-N50/K562-DeepMod-N50-basecall --methcallDir /fastscratch/liuya/nanocompare/K562-Runs/K562-DeepMod-N50/K562-DeepMod-N50-methcall -o /fastscratch/liuya/nanocompare/deepmod-read-level1.tsv --o2 /fastscratch/liuya/nanocompare/deepmod-read-level1-extract-output.bed """ sumdf, beddf = extract_deepmod_read_level_results_mp(basecallDir=args.basecallDir, methcallDir=args.methcallDir) logger.info(sumdf) logger.info(sumdf.iloc[1, :]) logger.info(sumdf['chr'].unique()) # outfn = os.path.join('/fastscratch/liuya/nanocompare/', 'deepmod-read-level.tsv') # Save read level results outfn = args.o sumdf.to_csv(outfn, sep='\t', index=False, header=False) logger.info(f'save to {outfn}') if args.o2: # Save CpG base level results bed file for cluster module use outfn = args.o2 beddf.to_csv(outfn, sep=' ', index=False, header=False) logger.info(f'save to {outfn}') elif args.cmd == 'bismark-convert': # Convert non-strand info bismark to strand ## bash meth_stats_tool.sh bismark-convert -i /pod/2/li-lab/Ziwei/Nanopore_methyl_compare/result/APL_BSseq/APL-bs_R1_val_1_bismark_bt2_pe.deduplicated.sorted.bed ## sbatch meth_stats_tool.sh bismark-convert -i /pod/2/li-lab/Ziwei/Nanopore_methyl_compare/result/APL_BSseq/APL-bs_R1_val_1_bismark_bt2_pe.deduplicated.sorted.bed df = pd.read_csv(args.i, sep='\t', header=None) if len(df.columns) != 6: raise Exception(f"Can no recognize input file format for infn={args.i}, df={df}") df.columns = ['chr', 'start', 'end', 'freq100', 'mcount', 'ccount'] logger.debug(df) convert_bismark_cov_to_gw_format(df) elif args.cmd == 'gc-density-bed': # sbatch meth_stats_tool.sh gc-density-bed infn = "/projects/li-lab/yang/workspace/nano-compare/data/genome-annotation/hg38.gc5Base.bed.gz" output_bed_by_bin2(infn, num_bins=5) if True: sys.exit(0) df = pd.read_csv(infn, sep='\t', header=None) df.iloc[:, 4] = df.iloc[:, 4].astype(int) logger.debug(df) bin_list = list(range(1, 6)) os.makedirs(args.o, exist_ok=True) with Pool(processes=args.processors) as pool: pool.map(output_bed_by_bin, bin_list) elif args.cmd == 'repetitive-bed': # sbatch meth_stats_tool.sh repetitive-bed # bash meth_stats_tool.sh repetitive-bed infn = "/projects/li-lab/yang/results/2021-07-01/hg38.repetitive.bed.gz" df = pd.read_csv(infn, sep='\t') df = df[df['genoName'].isin(humanChrSet)] df['n1'] = '.' df['n2'] = '.' logger.info(df) outfn = f"hg38.repetitive.rep_All.bed.gz" df[['genoName', 'genoStart', 'genoEnd', 'n1', 'n2', 'strand']].to_csv(os.path.join(args.o, outfn), sep='\t', header=False, index=False) region_dict = { "LINE": ["LINE"], "SINE": ["SINE"], "LTR": ["LTR"], "DNA": ["DNA"] } used_list = [] for key in region_dict: logger.info(f"seperate {key}") used_list += region_dict[key] ndf = df[df['repClass'].isin(region_dict[key])] ndf = ndf[['genoName', 'genoStart', 'genoEnd', 'n1', 'n2', 'strand']] # logger.info(ndf) outfn = f"hg38.repetitive.rep_{key}.bed.gz" ndf.to_csv(os.path.join(args.o, outfn), sep='\t', header=False, index=False) logger.info(f"len={len(ndf)}, save to {outfn}") ## Output others ndf = df[~df['repClass'].isin(used_list)] ndf = ndf[['genoName', 'genoStart', 'genoEnd', 'n1', 'n2', 'strand']] # logger.info(ndf) outfn = f"hg38.repetitive.rep_Others.bed.gz" ndf.to_csv(os.path.join(args.o, outfn), sep='\t', header=False, index=False) logger.info(f"len={len(ndf)}, save to {outfn}") elif args.cmd == 'apl-5hmc-bed': # Extract TSS format BED file for 5hmC # convert 1-based to 0-based results, output 5hmc level # bash meth_stats_tool.sh apl-5hmc-bed # file will be later converted into BW file infn = "/pod/2/li-lab/Nanopore_compare/data/APL_5hmC_BSseq/APL.cov5.mlml.addstrand.selected.bed.gz" outfn = os.path.join(args.o, "APL.5hmc.tss.cov5.bed.gz") save_tss_bed_for_5hmc(infn, outfn) infn = "/pod/2/li-lab/Nanopore_compare/data/APL_5hmC_BSseq/APL.mlml.addstrand.selected.bed.gz" outfn = os.path.join(args.o, "APL.5hmc.tss.cov1.bed.gz") save_tss_bed_for_5hmc(infn, outfn) pass elif args.cmd == 'merge-basecall-summary': ## sbatch meth_stats_tool.sh merge-basecall-summary -i /projects/li-lab/yang/results/2021-07-17/NA12878_basecall_logs_output baseDir = args.i flist = glob.glob(os.path.join(baseDir, '**', '*sequencing_summary.txt')) logger.info(flist) logger.info(len(flist)) dflist = [] for fn in flist: df = pd.read_csv(fn, sep='\t') dflist.append(df) dfall = pd.concat(dflist) outfn = os.path.join(args.o, 'NA12878-allChrs-basecall.sequencing_summary.txt') dfall.to_csv(outfn, sep='\t', index=False) logger.info(f"save to {outfn}") else: raise Exception(f"Not support command={args.cmd}") logger.info("meth_stats_tool DONE")
import logging from aiohttp import web from ledfx.api import RestEndpoint from ledfx.config import save_config from ledfx.events import Event _LOGGER = logging.getLogger(__name__) class QLCEndpoint(RestEndpoint): """REST end-point for querying and managing a QLC integration""" ENDPOINT_PATH = "/api/integrations/qlc/{integration_id}" async def get(self, integration_id, request) -> web.Response: """Get info from QLC+ integration""" integration = self._ledfx.integrations.get(integration_id) if (integration is None) or (integration.type != "qlc"): response = {"not found": 404} return web.json_response(data=response, status=404) data = await request.json() info = data.get("info") if info is None: response = { "status": "failed", "reason": 'Required attribute "info" was not provided', } return web.json_response(data=response, status=500) if info == "event_types": # generate dict of {effect_id: effect_name} effect_names = [] for effect_type, effect in self._ledfx.effects.classes().items(): effect_names.append(effect.NAME) scene_names = [] for scene in self._ledfx.config["scenes"]: scene_names.append(self._ledfx.config["scenes"][scene]["name"]) response = { Event.EFFECT_SET: { "event_name": "Effect Set", "event_filters": {"effect_name": effect_names}, }, Event.EFFECT_CLEARED: { "event_name": "Effect Cleared", "event_filters": {}, }, Event.SCENE_SET: { "event_name": "Scene Set", "event_filters": {"scene_name": scene_names}, }, } elif info == "qlc_widgets": response = await integration.get_widgets() elif info == "qlc_listeners": response = integration.data else: response = { "status": "failed", "reason": f'Unknown info parameter "{info}"', } return web.json_response(data=response, status=500) return web.json_response(data=response, status=200) async def put(self, integration_id, request) -> web.Response: """ Toggle a QLC event listener """ integration = self._ledfx.integrations.get(integration_id) if (integration is None) or (integration.type != "qlc"): response = {"not found": 404} return web.json_response(data=response, status=404) data = await request.json() event_type = data.get("event_type") event_filter = data.get("event_filter") if event_type is None: response = { "status": "failed", "reason": 'Required attribute "event_type" was not provided', } return web.json_response(data=response, status=500) if event_filter is None: response = { "status": "failed", "reason": 'Required attribute "event_filter" was not provided', } return web.json_response(data=response, status=500) if type(event_filter) is not dict: response = { "status": "failed", "reason": f'Invalid filter "{event_filter}', should be dictionary eg. {{ 'scene_name' : 'my scene' }} ', } return web.json_response(data=response, status=500) # toggle the event listener if not integration.toggle_event(event_type, event_filter): response = { "status": "failed", "reason": f"Could not find event with type {event_type} and filter {event_filter}", } return web.json_response(data=response, status=500) # Save the configuration (integration will handle modifying "data") for _integration in self._ledfx.config["integrations"]: if _integration["id"] == integration_id: _integration["data"] = integration.data break save_config( config=self._ledfx.config, config_dir=self._ledfx.config_dir, ) response = {"status": "success"} return web.json_response(data=response, status=200) async def post(self, integration_id, request) -> web.Response: """ Add a new QLC event listener or update an existing one """ integration = self._ledfx.integrations.get(integration_id) if (integration is None) or (integration.type != "qlc"): response = {"not found": 404} return web.json_response(data=response, status=404) data = await request.json() event_type = data.get("event_type") event_filter = data.get("event_filter") qlc_payload = data.get("qlc_payload") if event_type is None: response = { "status": "failed", "reason": 'Required attribute "event_type" was not provided', } return web.json_response(data=response, status=500) if event_filter is None: response = { "status": "failed", "reason": 'Required attribute "event_filter" was not provided', } return web.json_response(data=response, status=500) if type(event_filter) is not dict: response = { "status": "failed", "reason": f'Invalid filter "{event_filter}', should be dictionary eg. {{ 'scene_name' : 'my scene' }} ', } return web.json_response(data=response, status=500) if qlc_payload is None: response = { "status": "failed", "reason": 'Required attribute "qlc_payload" was not provided', } return web.json_response(data=response, status=500) # Create a link between ledfx event and sending the payload integration.create_event(event_type, event_filter, True, qlc_payload) # Update and save the configuration for _integration in self._ledfx.config["integrations"]: if _integration["id"] == integration_id: _integration["data"] = integration.data break save_config( config=self._ledfx.config, config_dir=self._ledfx.config_dir, ) response = {"status": "success"} return web.json_response(data=response, status=200) async def delete(self, integration_id, request) -> web.Response: """ Delete a QLC event listener """ integration = self._ledfx.integrations.get(integration_id) if (integration is None) or (integration.type != "qlc"): response = {"not found": 404} return web.json_response(data=response, status=404) data = await request.json() event_type = data.get("event_type") event_filter = data.get("event_filter") if event_type is None: response = { "status": "failed", "reason": 'Required attribute "event_type" was not provided', } return web.json_response(data=response, status=500) if event_filter is None: response = { "status": "failed", "reason": 'Required attribute "event_filter" was not provided', } return web.json_response(data=response, status=500) if type(event_filter) is not dict: response = { "status": "failed", "reason": f'Invalid filter "{event_filter}', should be dictionary eg. {{ 'scene_name' : 'my scene' }} ', } return web.json_response(data=response, status=500) # Delete the listener and event from data integration.delete_event(event_type, event_filter) # Save the configuration (integration will handle modifying "data") for _integration in self._ledfx.config["integrations"]: if _integration["id"] == integration_id: _integration["data"] = integration.data break save_config( config=self._ledfx.config, config_dir=self._ledfx.config_dir, ) response = {"status": "success"} return web.json_response(data=response, status=200)
import logging from aiohttp import web from ledfx.api import RestEndpoint from ledfx.config import save_config from ledfx.events import Event _LOGGER = logging.getLogger(__name__) class QLCEndpoint(RestEndpoint): """REST end-point for querying and managing a QLC integration""" ENDPOINT_PATH = "/api/integrations/qlc/{integration_id}" async def get(self, integration_id, request) -> web.Response: """Get info from QLC+ integration""" integration = self._ledfx.integrations.get(integration_id) if (integration is None) or (integration.type != "qlc"): response = {"not found": 404} return web.json_response(data=response, status=404) data = await request.json() info = data.get("info") if info is None: response = { "status": "failed", "reason": 'Required attribute "info" was not provided', } return web.json_response(data=response, status=500) if info == "event_types": # generate dict of {effect_id: effect_name} effect_names = [] for effect_type, effect in self._ledfx.effects.classes().items(): effect_names.append(effect.NAME) scene_names = [] for scene in self._ledfx.config["scenes"]: scene_names.append(self._ledfx.config["scenes"][scene]["name"]) response = { Event.EFFECT_SET: { "event_name": "Effect Set", "event_filters": {"effect_name": effect_names}, }, Event.EFFECT_CLEARED: { "event_name": "Effect Cleared", "event_filters": {}, }, Event.SCENE_SET: { "event_name": "Scene Set", "event_filters": {"scene_name": scene_names}, }, } elif info == "qlc_widgets": response = await integration.get_widgets() elif info == "qlc_listeners": response = integration.data else: response = { "status": "failed", "reason": f'Unknown info parameter "{info}"', } return web.json_response(data=response, status=500) return web.json_response(data=response, status=200) async def put(self, integration_id, request) -> web.Response: """ Toggle a QLC event listener """ integration = self._ledfx.integrations.get(integration_id) if (integration is None) or (integration.type != "qlc"): response = {"not found": 404} return web.json_response(data=response, status=404) data = await request.json() event_type = data.get("event_type") event_filter = data.get("event_filter") if event_type is None: response = { "status": "failed", "reason": 'Required attribute "event_type" was not provided', } return web.json_response(data=response, status=500) if event_filter is None: response = { "status": "failed", "reason": 'Required attribute "event_filter" was not provided', } return web.json_response(data=response, status=500) if type(event_filter) is not dict: response = { "status": "failed", "reason": f'Invalid filter "{event_filter}", should be dictionary eg. {{ "scene_name" : "my scene" }} ', } return web.json_response(data=response, status=500) # toggle the event listener if not integration.toggle_event(event_type, event_filter): response = { "status": "failed", "reason": f"Could not find event with type {event_type} and filter {event_filter}", } return web.json_response(data=response, status=500) # Save the configuration (integration will handle modifying "data") for _integration in self._ledfx.config["integrations"]: if _integration["id"] == integration_id: _integration["data"] = integration.data break save_config( config=self._ledfx.config, config_dir=self._ledfx.config_dir, ) response = {"status": "success"} return web.json_response(data=response, status=200) async def post(self, integration_id, request) -> web.Response: """ Add a new QLC event listener or update an existing one """ integration = self._ledfx.integrations.get(integration_id) if (integration is None) or (integration.type != "qlc"): response = {"not found": 404} return web.json_response(data=response, status=404) data = await request.json() event_type = data.get("event_type") event_filter = data.get("event_filter") qlc_payload = data.get("qlc_payload") if event_type is None: response = { "status": "failed", "reason": 'Required attribute "event_type" was not provided', } return web.json_response(data=response, status=500) if event_filter is None: response = { "status": "failed", "reason": 'Required attribute "event_filter" was not provided', } return web.json_response(data=response, status=500) if type(event_filter) is not dict: response = { "status": "failed", "reason": f'Invalid filter "{event_filter}", should be dictionary eg. {{ "scene_name" : "my scene" }} ', } return web.json_response(data=response, status=500) if qlc_payload is None: response = { "status": "failed", "reason": 'Required attribute "qlc_payload" was not provided', } return web.json_response(data=response, status=500) # Create a link between ledfx event and sending the payload integration.create_event(event_type, event_filter, True, qlc_payload) # Update and save the configuration for _integration in self._ledfx.config["integrations"]: if _integration["id"] == integration_id: _integration["data"] = integration.data break save_config( config=self._ledfx.config, config_dir=self._ledfx.config_dir, ) response = {"status": "success"} return web.json_response(data=response, status=200) async def delete(self, integration_id, request) -> web.Response: """ Delete a QLC event listener """ integration = self._ledfx.integrations.get(integration_id) if (integration is None) or (integration.type != "qlc"): response = {"not found": 404} return web.json_response(data=response, status=404) data = await request.json() event_type = data.get("event_type") event_filter = data.get("event_filter") if event_type is None: response = { "status": "failed", "reason": 'Required attribute "event_type" was not provided', } return web.json_response(data=response, status=500) if event_filter is None: response = { "status": "failed", "reason": 'Required attribute "event_filter" was not provided', } return web.json_response(data=response, status=500) if type(event_filter) is not dict: response = { "status": "failed", "reason": f'Invalid filter "{event_filter}", should be dictionary eg. {{ "scene_name" : "my scene" }} ', } return web.json_response(data=response, status=500) # Delete the listener and event from data integration.delete_event(event_type, event_filter) # Save the configuration (integration will handle modifying "data") for _integration in self._ledfx.config["integrations"]: if _integration["id"] == integration_id: _integration["data"] = integration.data break save_config( config=self._ledfx.config, config_dir=self._ledfx.config_dir, ) response = {"status": "success"} return web.json_response(data=response, status=200)
import json import logging import re from datetime import datetime from random import random from typing import List, NamedTuple, Optional from urllib.parse import urlparse import requests from streamlink.exceptions import NoStreamsError, PluginError from streamlink.plugin import Plugin, PluginArgument, PluginArguments, pluginmatcher from streamlink.plugin.api import validate from streamlink.stream.hls import HLSStream, HLSStreamReader, HLSStreamWorker, HLSStreamWriter from streamlink.stream.hls_playlist import ByteRange, ExtInf, Key, M3U8, M3U8Parser, Map, load as load_hls_playlist from streamlink.stream.http import HTTPStream from streamlink.utils.args import keyvalue from streamlink.utils.parse import parse_json, parse_qsd from streamlink.utils.times import hours_minutes_seconds from streamlink.utils.url import update_qsd try: import browser_cookie3 as browser_cookie except ImportError: browser_cookie = None log = logging.getLogger(__name__) LOW_LATENCY_MAX_LIVE_EDGE = 2 class TwitchSegment(NamedTuple): uri: str duration: float title: Optional[str] key: Optional[Key] discontinuity: bool byterange: Optional[ByteRange] date: Optional[datetime] map: Optional[Map] ad: bool prefetch: bool # generic namedtuples are unsupported, so just subclass class TwitchSequence(NamedTuple): num: int segment: TwitchSegment class TwitchM3U8(M3U8): segments: List[TwitchSegment] def __init__(self): super().__init__() self.dateranges_ads = [] class TwitchM3U8Parser(M3U8Parser): m3u8: TwitchM3U8 def parse_tag_ext_x_twitch_prefetch(self, value): segments = self.m3u8.segments if not segments: # pragma: no cover return last = segments[-1] # Use the average duration of all regular segments for the duration of prefetch segments. # This is better than using the duration of the last segment when regular segment durations vary a lot. # In low latency mode, the playlist reload time is the duration of the last segment. duration = ( last.duration if last.prefetch else sum(segment.duration for segment in segments) / float(len(segments)) ) segments.append(last._replace(uri=self.uri(value), duration=duration, prefetch=True)) def parse_tag_ext_x_daterange(self, value): super().parse_tag_ext_x_daterange(value) daterange = self.m3u8.dateranges[-1] is_ad = ( daterange.classname == "twitch-stitched-ad" or str(daterange.id or "").startswith("stitched-ad-") or any(attr_key.startswith("X-TV-TWITCH-AD-") for attr_key in daterange.x.keys()) ) if is_ad: self.m3u8.dateranges_ads.append(daterange) def get_segment(self, uri: str) -> TwitchSegment: extinf: ExtInf = self.state.pop("extinf", None) or ExtInf(0, None) date = self.state.pop("date", None) ad = any(self.m3u8.is_date_in_daterange(date, daterange) for daterange in self.m3u8.dateranges_ads) return TwitchSegment( uri=uri, duration=extinf.duration, title=extinf.title, key=self.state.get("key"), discontinuity=self.state.pop("discontinuity", False), byterange=self.state.pop("byterange", None), date=date, map=self.state.get("map"), ad=ad, prefetch=False, ) class TwitchHLSStreamWorker(HLSStreamWorker): def __init__(self, reader, *args, **kwargs): self.had_content = False super().__init__(reader, *args, **kwargs) def _reload_playlist(self, *args): return load_hls_playlist(*args, parser=TwitchM3U8Parser, m3u8=TwitchM3U8) def _playlist_reload_time(self, playlist: TwitchM3U8, sequences: List[TwitchSequence]): if self.stream.low_latency and sequences: return sequences[-1].segment.duration return super()._playlist_reload_time(playlist, sequences) def process_sequences(self, playlist: TwitchM3U8, sequences: List[TwitchSequence]): # ignore prefetch segments if not LL streaming if not self.stream.low_latency: sequences = [seq for seq in sequences if not seq.segment.prefetch] # check for sequences with real content if not self.had_content: self.had_content = next((True for seq in sequences if not seq.segment.ad), False) # When filtering ads, to check whether it's a LL stream, we need to wait for the real content to show up, # since playlists with only ad segments don't contain prefetch segments if ( self.stream.low_latency and self.had_content and not next((True for seq in sequences if seq.segment.prefetch), False) ): log.info("This is not a low latency stream") # show pre-roll ads message only on the first playlist containing ads if self.stream.disable_ads and self.playlist_sequence == -1 and not self.had_content: log.info("Waiting for pre-roll ads to finish, be patient") return super().process_sequences(playlist, sequences) class TwitchHLSStreamWriter(HLSStreamWriter): def should_filter_sequence(self, sequence: TwitchSequence): return self.stream.disable_ads and sequence.segment.ad class TwitchHLSStreamReader(HLSStreamReader): __worker__ = TwitchHLSStreamWorker __writer__ = TwitchHLSStreamWriter def __init__(self, stream): if stream.disable_ads: log.info("Will skip ad segments") if stream.low_latency: live_edge = max(1, min(LOW_LATENCY_MAX_LIVE_EDGE, stream.session.options.get("hls-live-edge"))) stream.session.options.set("hls-live-edge", live_edge) stream.session.options.set("hls-segment-stream-data", True) log.info(f"Low latency streaming (HLS live edge: {live_edge})") super().__init__(stream) class TwitchHLSStream(HLSStream): __reader__ = TwitchHLSStreamReader def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.disable_ads = self.session.get_plugin_option("twitch", "disable-ads") self.low_latency = self.session.get_plugin_option("twitch", "low-latency") class UsherService: def __init__(self, session): self.session = session self.purple_adblock = self.session.get_plugin_option("twitch", "purple-adblock") def _create_url(self, endpoint, **extra_params): if self.purple_adblock: url = f"https://jupter.ga{endpoint}" else: url = f"https://usher.ttvnw.net{endpoint}" params = { "player": "twitchweb", "p": int(random() * 999999), "type": "any", "allow_source": "true", "allow_audio_only": "true", "allow_spectre": "false", } params.update(extra_params) req = requests.Request("GET", url, params=params) req = self.session.http.prepare_request(req) log.info(f"Params on usher m3u8 request: {params}") return req.url def channel(self, channel, **extra_params): try: extra_params_debug = validate.Schema( validate.get("token"), validate.parse_json(), {"adblock": bool, "geoblock_reason": str, "hide_ads": bool, "server_ads": bool, "show_ads": bool}, ).validate(extra_params) log.debug(f"{extra_params_debug!r}") except PluginError: pass if self.purple_adblock: return self._create_url(f"/channel/{channel}", **extra_params) return self._create_url(f"/api/channel/hls/{channel}.m3u8", **extra_params) def video(self, video_id, **extra_params): return self._create_url(f"/vod/{video_id}", **extra_params) class TwitchAPI: def __init__(self, session): self.session = session self.headers = { "Client-ID": "kimne78kx3ncx6brgo4mv6wki5h1ko", } if session.get_plugin_option("twitch", "chrome-oauth") and browser_cookie: oauth_token = next( (co.value for co in browser_cookie.chrome(domain_name=".twitch.tv") if co.name == "auth-token"), None ) if oauth_token: self.headers.update({"Authorization": f"OAuth {oauth_token}"}) self.headers.update(**{k: v for k, v in session.get_plugin_option("twitch", "api-header") or []}) def call(self, data, schema=None): res = self.session.http.post("https://gql.twitch.tv/gql", data=json.dumps(data), headers=self.headers) return self.session.http.json(res, schema=schema) @staticmethod def _gql_persisted_query(operationname, sha256hash, **variables): return { "operationName": operationname, "extensions": {"persistedQuery": {"version": 1, "sha256Hash": sha256hash}}, "variables": dict(**variables), } @staticmethod def parse_token(tokenstr): return parse_json( tokenstr, schema=validate.Schema( { "chansub": { "restricted_bitrates": validate.all( [str], validate.filter(lambda n: not re.match(r"(.+_)?archives|live|chunked", n)) ) } }, validate.get(("chansub", "restricted_bitrates")), ), ) # GraphQL API calls def metadata_video(self, video_id): query = self._gql_persisted_query( "VideoMetadata", "cb3b1eb2f2d2b2f65b8389ba446ec521d76c3aa44f5424a1b1d235fe21eb4806", channelLogin="", # parameter can be empty videoID=video_id, ) return self.call( query, schema=validate.Schema( { "data": { "video": {"id": str, "owner": {"displayName": str}, "title": str, "game": {"displayName": str}} } }, validate.get(("data", "video")), validate.union_get("id", ("owner", "displayName"), ("game", "displayName"), "title"), ), ) def metadata_channel(self, channel): queries = [ self._gql_persisted_query( "ChannelShell", "c3ea5a669ec074a58df5c11ce3c27093fa38534c94286dc14b68a25d5adcbf55", login=channel, lcpVideosEnabled=False, ), self._gql_persisted_query( "StreamMetadata", "059c4653b788f5bdb2f5a2d2a24b0ddc3831a15079001a3d927556a96fb0517f", channelLogin=channel, ), ] return self.call( queries, schema=validate.Schema( [ validate.all({"data": {"userOrError": {"displayName": str}}}), validate.all( { "data": { "user": {"lastBroadcast": {"title": str}, "stream": {"id": str, "game": {"name": str}}} } } ), ], validate.union_get( (1, "data", "user", "stream", "id"), (0, "data", "userOrError", "displayName"), (1, "data", "user", "stream", "game", "name"), (1, "data", "user", "lastBroadcast", "title"), ), ), ) def metadata_clips(self, clipname): queries = [ self._gql_persisted_query( "ClipsView", "4480c1dcc2494a17bb6ef64b94a5213a956afb8a45fe314c66b0d04079a93a8f", slug=clipname ), self._gql_persisted_query( "ClipsTitle", "f6cca7f2fdfbfc2cecea0c88452500dae569191e58a265f97711f8f2a838f5b4", slug=clipname ), ] return self.call( queries, schema=validate.Schema( [ validate.all( {"data": {"clip": {"id": str, "broadcaster": {"displayName": str}, "game": {"name": str}}}}, validate.get(("data", "clip")), ), validate.all({"data": {"clip": {"title": str}}}, validate.get(("data", "clip"))), ], validate.union_get((0, "id"), (0, "broadcaster", "displayName"), (0, "game", "name"), (1, "title")), ), ) def access_token(self, is_live, channel_or_vod): query = self._gql_persisted_query( "PlaybackAccessToken", "0828119ded1c13477966434e15800ff57ddacf13ba1911c129dc2200705b0712", isLive=is_live, login=channel_or_vod if is_live else "", isVod=not is_live, vodID=channel_or_vod if not is_live else "", playerType="embed", ) subschema = validate.any( None, validate.all({"value": str, "signature": str}, validate.union_get("signature", "value")) ) return self.call( query, schema=validate.Schema( { "data": validate.any( validate.all( {"streamPlaybackAccessToken": subschema}, validate.get("streamPlaybackAccessToken") ), validate.all( {"videoPlaybackAccessToken": subschema}, validate.get("videoPlaybackAccessToken") ), ) }, validate.get("data"), ), ) def clips(self, clipname): query = self._gql_persisted_query( "VideoAccessToken_Clip", "36b89d2507fce29e5ca551df756d27c1cfe079e2609642b4390aa4c35796eb11", slug=clipname ) return self.call( query, schema=validate.Schema( { "data": { "clip": { "playbackAccessToken": {"signature": str, "value": str}, "videoQualities": [ validate.all( { "frameRate": validate.transform(int), "quality": str, "sourceURL": validate.url(), }, validate.transform(lambda q: (f"{q["quality"]}p{q["frameRate"]}", q["sourceURL"])), ) ], } } }, validate.get(("data", "clip")), validate.union_get( ("playbackAccessToken", "signature"), ("playbackAccessToken", "value"), "videoQualities" ), ), ) def stream_metadata(self, channel): query = self._gql_persisted_query( "StreamMetadata", "1c719a40e481453e5c48d9bb585d971b8b372f8ebb105b17076722264dfa5b3e", channelLogin=channel ) return self.call( query, schema=validate.Schema( {"data": {"user": {"stream": {"type": str}}}}, validate.get(("data", "user", "stream")) ), ) def hosted_channel(self, channel): query = self._gql_persisted_query( "UseHosting", "427f55a3daca510f726c02695a898ef3a0de4355b39af328848876052ea6b337", channelLogin=channel ) return self.call( query, schema=validate.Schema( {"data": {"user": {"hosting": {"login": str, "displayName": str}}}}, validate.get(("data", "user", "hosting")), validate.union_get("login", "displayName"), ), ) @pluginmatcher( re.compile( r""" https?://(?:(?P<subdomain>[\w-]+)\.)?twitch\.tv/ (?: videos/(?P<videos_id>\d+) | (?P<channel>[^/]+) (?: /video/(?P<video_id>\d+) | /clip/(?P<clip_name>[\w-]+) )? ) """, re.VERBOSE, ) ) class Twitch(Plugin): arguments = PluginArguments( PluginArgument( "chrome-oauth", action="store_true", help=""" Extract OAuth token from Chrome. """, ), PluginArgument( "purple-adblock", action="store_true", help=""" Use Purple Adblock to block ads. """, ), PluginArgument( "disable-hosting", action="store_true", help=""" Do not open the stream if the target channel is hosting another channel. """, ), PluginArgument( "disable-ads", action="store_true", help=""" Skip embedded advertisement segments at the beginning or during a stream. Will cause these segments to be missing from the stream. """, ), PluginArgument( "disable-reruns", action="store_true", help=""" Do not open the stream if the target channel is currently broadcasting a rerun. """, ), PluginArgument( "low-latency", action="store_true", help=f""" Enables low latency streaming by prefetching HLS segments. Sets --hls-segment-stream-data to true and --hls-live-edge to {LOW_LATENCY_MAX_LIVE_EDGE}, if it is higher. Reducing --hls-live-edge to 1 will result in the lowest latency possible, but will most likely cause buffering. In order to achieve true low latency streaming during playback, the player's caching/buffering settings will need to be adjusted and reduced to a value as low as possible, but still high enough to not cause any buffering. This depends on the stream's bitrate and the quality of the connection to Twitch's servers. Please refer to the player's own documentation for the required configuration. Player parameters can be set via --player-args. Note: Low latency streams have to be enabled by the broadcasters on Twitch themselves. Regular streams can cause buffering issues with this option enabled due to the reduced --hls-live-edge value. """, ), PluginArgument( "api-header", metavar="KEY=VALUE", type=keyvalue, action="append", help=""" A header to add to each Twitch API HTTP request. Can be repeated to add multiple headers. """, ), ) def __init__(self, url): super().__init__(url) match = self.match.groupdict() parsed = urlparse(url) self.params = parse_qsd(parsed.query) self.subdomain = match.get("subdomain") self.video_id = None self.channel = None self.clip_name = None self._checked_metadata = False if self.subdomain == "player": # pop-out player if self.params.get("video"): self.video_id = self.params["video"] self.channel = self.params.get("channel") elif self.subdomain == "clips": # clip share URL self.clip_name = match.get("channel") else: self.channel = match.get("channel") and match.get("channel").lower() self.video_id = match.get("video_id") or match.get("videos_id") self.clip_name = match.get("clip_name") self.api = TwitchAPI(session=self.session) self.usher = UsherService(session=self.session) def method_factory(parent_method): def inner(): if not self._checked_metadata: self._checked_metadata = True self._get_metadata() return parent_method() return inner parent = super() for metadata in "id", "author", "category", "title": method = f"get_{metadata}" setattr(self, method, method_factory(getattr(parent, method))) def _get_metadata(self): try: if self.video_id: data = self.api.metadata_video(self.video_id) elif self.clip_name: data = self.api.metadata_clips(self.clip_name) elif self.channel: data = self.api.metadata_channel(self.channel) else: # pragma: no cover return self.id, self.author, self.category, self.title = data except (PluginError, TypeError): pass def _access_token(self, is_live, channel_or_vod): try: sig, token = self.api.access_token(is_live, channel_or_vod) except (PluginError, TypeError): raise NoStreamsError(self.url) try: restricted_bitrates = self.api.parse_token(token) except PluginError: restricted_bitrates = [] return sig, token, restricted_bitrates def _switch_to_hosted_channel(self): disabled = self.options.get("disable_hosting") hosted_chain = [self.channel] while True: try: login, display_name = self.api.hosted_channel(self.channel) except PluginError: return False log.info(f"{self.channel} is hosting {login}") if disabled: log.info("hosting was disabled by command line option") return True if login in hosted_chain: loop = " -> ".join(hosted_chain + [login]) log.error(f"A loop of hosted channels has been detected, cannot find a playable stream. ({loop})") return True hosted_chain.append(login) log.info(f"switching to {login}") self.channel = login self.author = display_name def _check_for_rerun(self): if not self.options.get("disable_reruns"): return False try: stream = self.api.stream_metadata(self.channel) if stream["type"] != "live": log.info("Reruns were disabled by command line option") return True except (PluginError, TypeError): pass return False def _get_hls_streams_live(self): if self._switch_to_hosted_channel(): return if self._check_for_rerun(): return # only get the token once the channel has been resolved log.debug(f"Getting live HLS streams for {self.channel}") self.session.http.headers.update({"referer": "https://player.twitch.tv", "origin": "https://player.twitch.tv"}) sig, token, restricted_bitrates = self._access_token(True, self.channel) url = self.usher.channel(self.channel, sig=sig, token=token, fast_bread=True) return self._get_hls_streams(url, restricted_bitrates) def _get_hls_streams_video(self): log.debug(f"Getting HLS streams for video ID {self.video_id}") sig, token, restricted_bitrates = self._access_token(False, self.video_id) url = self.usher.video(self.video_id, nauthsig=sig, nauth=token) # If the stream is a VOD that is still being recorded, the stream should start at the beginning of the recording return self._get_hls_streams(url, restricted_bitrates, force_restart=True) def _get_hls_streams(self, url, restricted_bitrates, **extra_params): time_offset = self.params.get("t", 0) if time_offset: try: time_offset = hours_minutes_seconds(time_offset) except ValueError: time_offset = 0 try: streams = TwitchHLSStream.parse_variant_playlist( self.session, url, start_offset=time_offset, **extra_params ) except OSError as err: err = str(err) if "404 Client Error" in err or "Failed to parse playlist" in err: return else: raise PluginError(err) for name in restricted_bitrates: if name not in streams: log.warning(f"The quality '{name}' is not available since it requires a subscription.") return streams def _get_clips(self): try: sig, token, streams = self.api.clips(self.clip_name) except (PluginError, TypeError): return for quality, stream in streams: yield quality, HTTPStream(self.session, update_qsd(stream, {"sig": sig, "token": token})) def _get_streams(self): if self.video_id: return self._get_hls_streams_video() elif self.clip_name: return self._get_clips() elif self.channel: return self._get_hls_streams_live() __plugin__ = Twitch
import json import logging import re from datetime import datetime from random import random from typing import List, NamedTuple, Optional from urllib.parse import urlparse import requests from streamlink.exceptions import NoStreamsError, PluginError from streamlink.plugin import Plugin, PluginArgument, PluginArguments, pluginmatcher from streamlink.plugin.api import validate from streamlink.stream.hls import HLSStream, HLSStreamReader, HLSStreamWorker, HLSStreamWriter from streamlink.stream.hls_playlist import ByteRange, ExtInf, Key, M3U8, M3U8Parser, Map, load as load_hls_playlist from streamlink.stream.http import HTTPStream from streamlink.utils.args import keyvalue from streamlink.utils.parse import parse_json, parse_qsd from streamlink.utils.times import hours_minutes_seconds from streamlink.utils.url import update_qsd try: import browser_cookie3 as browser_cookie except ImportError: browser_cookie = None log = logging.getLogger(__name__) LOW_LATENCY_MAX_LIVE_EDGE = 2 class TwitchSegment(NamedTuple): uri: str duration: float title: Optional[str] key: Optional[Key] discontinuity: bool byterange: Optional[ByteRange] date: Optional[datetime] map: Optional[Map] ad: bool prefetch: bool # generic namedtuples are unsupported, so just subclass class TwitchSequence(NamedTuple): num: int segment: TwitchSegment class TwitchM3U8(M3U8): segments: List[TwitchSegment] def __init__(self): super().__init__() self.dateranges_ads = [] class TwitchM3U8Parser(M3U8Parser): m3u8: TwitchM3U8 def parse_tag_ext_x_twitch_prefetch(self, value): segments = self.m3u8.segments if not segments: # pragma: no cover return last = segments[-1] # Use the average duration of all regular segments for the duration of prefetch segments. # This is better than using the duration of the last segment when regular segment durations vary a lot. # In low latency mode, the playlist reload time is the duration of the last segment. duration = ( last.duration if last.prefetch else sum(segment.duration for segment in segments) / float(len(segments)) ) segments.append(last._replace(uri=self.uri(value), duration=duration, prefetch=True)) def parse_tag_ext_x_daterange(self, value): super().parse_tag_ext_x_daterange(value) daterange = self.m3u8.dateranges[-1] is_ad = ( daterange.classname == "twitch-stitched-ad" or str(daterange.id or "").startswith("stitched-ad-") or any(attr_key.startswith("X-TV-TWITCH-AD-") for attr_key in daterange.x.keys()) ) if is_ad: self.m3u8.dateranges_ads.append(daterange) def get_segment(self, uri: str) -> TwitchSegment: extinf: ExtInf = self.state.pop("extinf", None) or ExtInf(0, None) date = self.state.pop("date", None) ad = any(self.m3u8.is_date_in_daterange(date, daterange) for daterange in self.m3u8.dateranges_ads) return TwitchSegment( uri=uri, duration=extinf.duration, title=extinf.title, key=self.state.get("key"), discontinuity=self.state.pop("discontinuity", False), byterange=self.state.pop("byterange", None), date=date, map=self.state.get("map"), ad=ad, prefetch=False, ) class TwitchHLSStreamWorker(HLSStreamWorker): def __init__(self, reader, *args, **kwargs): self.had_content = False super().__init__(reader, *args, **kwargs) def _reload_playlist(self, *args): return load_hls_playlist(*args, parser=TwitchM3U8Parser, m3u8=TwitchM3U8) def _playlist_reload_time(self, playlist: TwitchM3U8, sequences: List[TwitchSequence]): if self.stream.low_latency and sequences: return sequences[-1].segment.duration return super()._playlist_reload_time(playlist, sequences) def process_sequences(self, playlist: TwitchM3U8, sequences: List[TwitchSequence]): # ignore prefetch segments if not LL streaming if not self.stream.low_latency: sequences = [seq for seq in sequences if not seq.segment.prefetch] # check for sequences with real content if not self.had_content: self.had_content = next((True for seq in sequences if not seq.segment.ad), False) # When filtering ads, to check whether it's a LL stream, we need to wait for the real content to show up, # since playlists with only ad segments don't contain prefetch segments if ( self.stream.low_latency and self.had_content and not next((True for seq in sequences if seq.segment.prefetch), False) ): log.info("This is not a low latency stream") # show pre-roll ads message only on the first playlist containing ads if self.stream.disable_ads and self.playlist_sequence == -1 and not self.had_content: log.info("Waiting for pre-roll ads to finish, be patient") return super().process_sequences(playlist, sequences) class TwitchHLSStreamWriter(HLSStreamWriter): def should_filter_sequence(self, sequence: TwitchSequence): return self.stream.disable_ads and sequence.segment.ad class TwitchHLSStreamReader(HLSStreamReader): __worker__ = TwitchHLSStreamWorker __writer__ = TwitchHLSStreamWriter def __init__(self, stream): if stream.disable_ads: log.info("Will skip ad segments") if stream.low_latency: live_edge = max(1, min(LOW_LATENCY_MAX_LIVE_EDGE, stream.session.options.get("hls-live-edge"))) stream.session.options.set("hls-live-edge", live_edge) stream.session.options.set("hls-segment-stream-data", True) log.info(f"Low latency streaming (HLS live edge: {live_edge})") super().__init__(stream) class TwitchHLSStream(HLSStream): __reader__ = TwitchHLSStreamReader def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.disable_ads = self.session.get_plugin_option("twitch", "disable-ads") self.low_latency = self.session.get_plugin_option("twitch", "low-latency") class UsherService: def __init__(self, session): self.session = session self.purple_adblock = self.session.get_plugin_option("twitch", "purple-adblock") def _create_url(self, endpoint, **extra_params): if self.purple_adblock: url = f"https://jupter.ga{endpoint}" else: url = f"https://usher.ttvnw.net{endpoint}" params = { "player": "twitchweb", "p": int(random() * 999999), "type": "any", "allow_source": "true", "allow_audio_only": "true", "allow_spectre": "false", } params.update(extra_params) req = requests.Request("GET", url, params=params) req = self.session.http.prepare_request(req) log.info(f"Params on usher m3u8 request: {params}") return req.url def channel(self, channel, **extra_params): try: extra_params_debug = validate.Schema( validate.get("token"), validate.parse_json(), {"adblock": bool, "geoblock_reason": str, "hide_ads": bool, "server_ads": bool, "show_ads": bool}, ).validate(extra_params) log.debug(f"{extra_params_debug!r}") except PluginError: pass if self.purple_adblock: return self._create_url(f"/channel/{channel}", **extra_params) return self._create_url(f"/api/channel/hls/{channel}.m3u8", **extra_params) def video(self, video_id, **extra_params): return self._create_url(f"/vod/{video_id}", **extra_params) class TwitchAPI: def __init__(self, session): self.session = session self.headers = { "Client-ID": "kimne78kx3ncx6brgo4mv6wki5h1ko", } if session.get_plugin_option("twitch", "chrome-oauth") and browser_cookie: oauth_token = next( (co.value for co in browser_cookie.chrome(domain_name=".twitch.tv") if co.name == "auth-token"), None ) if oauth_token: self.headers.update({"Authorization": f"OAuth {oauth_token}"}) self.headers.update(**{k: v for k, v in session.get_plugin_option("twitch", "api-header") or []}) def call(self, data, schema=None): res = self.session.http.post("https://gql.twitch.tv/gql", data=json.dumps(data), headers=self.headers) return self.session.http.json(res, schema=schema) @staticmethod def _gql_persisted_query(operationname, sha256hash, **variables): return { "operationName": operationname, "extensions": {"persistedQuery": {"version": 1, "sha256Hash": sha256hash}}, "variables": dict(**variables), } @staticmethod def parse_token(tokenstr): return parse_json( tokenstr, schema=validate.Schema( { "chansub": { "restricted_bitrates": validate.all( [str], validate.filter(lambda n: not re.match(r"(.+_)?archives|live|chunked", n)) ) } }, validate.get(("chansub", "restricted_bitrates")), ), ) # GraphQL API calls def metadata_video(self, video_id): query = self._gql_persisted_query( "VideoMetadata", "cb3b1eb2f2d2b2f65b8389ba446ec521d76c3aa44f5424a1b1d235fe21eb4806", channelLogin="", # parameter can be empty videoID=video_id, ) return self.call( query, schema=validate.Schema( { "data": { "video": {"id": str, "owner": {"displayName": str}, "title": str, "game": {"displayName": str}} } }, validate.get(("data", "video")), validate.union_get("id", ("owner", "displayName"), ("game", "displayName"), "title"), ), ) def metadata_channel(self, channel): queries = [ self._gql_persisted_query( "ChannelShell", "c3ea5a669ec074a58df5c11ce3c27093fa38534c94286dc14b68a25d5adcbf55", login=channel, lcpVideosEnabled=False, ), self._gql_persisted_query( "StreamMetadata", "059c4653b788f5bdb2f5a2d2a24b0ddc3831a15079001a3d927556a96fb0517f", channelLogin=channel, ), ] return self.call( queries, schema=validate.Schema( [ validate.all({"data": {"userOrError": {"displayName": str}}}), validate.all( { "data": { "user": {"lastBroadcast": {"title": str}, "stream": {"id": str, "game": {"name": str}}} } } ), ], validate.union_get( (1, "data", "user", "stream", "id"), (0, "data", "userOrError", "displayName"), (1, "data", "user", "stream", "game", "name"), (1, "data", "user", "lastBroadcast", "title"), ), ), ) def metadata_clips(self, clipname): queries = [ self._gql_persisted_query( "ClipsView", "4480c1dcc2494a17bb6ef64b94a5213a956afb8a45fe314c66b0d04079a93a8f", slug=clipname ), self._gql_persisted_query( "ClipsTitle", "f6cca7f2fdfbfc2cecea0c88452500dae569191e58a265f97711f8f2a838f5b4", slug=clipname ), ] return self.call( queries, schema=validate.Schema( [ validate.all( {"data": {"clip": {"id": str, "broadcaster": {"displayName": str}, "game": {"name": str}}}}, validate.get(("data", "clip")), ), validate.all({"data": {"clip": {"title": str}}}, validate.get(("data", "clip"))), ], validate.union_get((0, "id"), (0, "broadcaster", "displayName"), (0, "game", "name"), (1, "title")), ), ) def access_token(self, is_live, channel_or_vod): query = self._gql_persisted_query( "PlaybackAccessToken", "0828119ded1c13477966434e15800ff57ddacf13ba1911c129dc2200705b0712", isLive=is_live, login=channel_or_vod if is_live else "", isVod=not is_live, vodID=channel_or_vod if not is_live else "", playerType="embed", ) subschema = validate.any( None, validate.all({"value": str, "signature": str}, validate.union_get("signature", "value")) ) return self.call( query, schema=validate.Schema( { "data": validate.any( validate.all( {"streamPlaybackAccessToken": subschema}, validate.get("streamPlaybackAccessToken") ), validate.all( {"videoPlaybackAccessToken": subschema}, validate.get("videoPlaybackAccessToken") ), ) }, validate.get("data"), ), ) def clips(self, clipname): query = self._gql_persisted_query( "VideoAccessToken_Clip", "36b89d2507fce29e5ca551df756d27c1cfe079e2609642b4390aa4c35796eb11", slug=clipname ) return self.call( query, schema=validate.Schema( { "data": { "clip": { "playbackAccessToken": {"signature": str, "value": str}, "videoQualities": [ validate.all( { "frameRate": validate.transform(int), "quality": str, "sourceURL": validate.url(), }, validate.transform(lambda q: (f"{q['quality']}p{q['frameRate']}", q["sourceURL"])), ) ], } } }, validate.get(("data", "clip")), validate.union_get( ("playbackAccessToken", "signature"), ("playbackAccessToken", "value"), "videoQualities" ), ), ) def stream_metadata(self, channel): query = self._gql_persisted_query( "StreamMetadata", "1c719a40e481453e5c48d9bb585d971b8b372f8ebb105b17076722264dfa5b3e", channelLogin=channel ) return self.call( query, schema=validate.Schema( {"data": {"user": {"stream": {"type": str}}}}, validate.get(("data", "user", "stream")) ), ) def hosted_channel(self, channel): query = self._gql_persisted_query( "UseHosting", "427f55a3daca510f726c02695a898ef3a0de4355b39af328848876052ea6b337", channelLogin=channel ) return self.call( query, schema=validate.Schema( {"data": {"user": {"hosting": {"login": str, "displayName": str}}}}, validate.get(("data", "user", "hosting")), validate.union_get("login", "displayName"), ), ) @pluginmatcher( re.compile( r""" https?://(?:(?P<subdomain>[\w-]+)\.)?twitch\.tv/ (?: videos/(?P<videos_id>\d+) | (?P<channel>[^/]+) (?: /video/(?P<video_id>\d+) | /clip/(?P<clip_name>[\w-]+) )? ) """, re.VERBOSE, ) ) class Twitch(Plugin): arguments = PluginArguments( PluginArgument( "chrome-oauth", action="store_true", help=""" Extract OAuth token from Chrome. """, ), PluginArgument( "purple-adblock", action="store_true", help=""" Use Purple Adblock to block ads. """, ), PluginArgument( "disable-hosting", action="store_true", help=""" Do not open the stream if the target channel is hosting another channel. """, ), PluginArgument( "disable-ads", action="store_true", help=""" Skip embedded advertisement segments at the beginning or during a stream. Will cause these segments to be missing from the stream. """, ), PluginArgument( "disable-reruns", action="store_true", help=""" Do not open the stream if the target channel is currently broadcasting a rerun. """, ), PluginArgument( "low-latency", action="store_true", help=f""" Enables low latency streaming by prefetching HLS segments. Sets --hls-segment-stream-data to true and --hls-live-edge to {LOW_LATENCY_MAX_LIVE_EDGE}, if it is higher. Reducing --hls-live-edge to 1 will result in the lowest latency possible, but will most likely cause buffering. In order to achieve true low latency streaming during playback, the player's caching/buffering settings will need to be adjusted and reduced to a value as low as possible, but still high enough to not cause any buffering. This depends on the stream's bitrate and the quality of the connection to Twitch's servers. Please refer to the player's own documentation for the required configuration. Player parameters can be set via --player-args. Note: Low latency streams have to be enabled by the broadcasters on Twitch themselves. Regular streams can cause buffering issues with this option enabled due to the reduced --hls-live-edge value. """, ), PluginArgument( "api-header", metavar="KEY=VALUE", type=keyvalue, action="append", help=""" A header to add to each Twitch API HTTP request. Can be repeated to add multiple headers. """, ), ) def __init__(self, url): super().__init__(url) match = self.match.groupdict() parsed = urlparse(url) self.params = parse_qsd(parsed.query) self.subdomain = match.get("subdomain") self.video_id = None self.channel = None self.clip_name = None self._checked_metadata = False if self.subdomain == "player": # pop-out player if self.params.get("video"): self.video_id = self.params["video"] self.channel = self.params.get("channel") elif self.subdomain == "clips": # clip share URL self.clip_name = match.get("channel") else: self.channel = match.get("channel") and match.get("channel").lower() self.video_id = match.get("video_id") or match.get("videos_id") self.clip_name = match.get("clip_name") self.api = TwitchAPI(session=self.session) self.usher = UsherService(session=self.session) def method_factory(parent_method): def inner(): if not self._checked_metadata: self._checked_metadata = True self._get_metadata() return parent_method() return inner parent = super() for metadata in "id", "author", "category", "title": method = f"get_{metadata}" setattr(self, method, method_factory(getattr(parent, method))) def _get_metadata(self): try: if self.video_id: data = self.api.metadata_video(self.video_id) elif self.clip_name: data = self.api.metadata_clips(self.clip_name) elif self.channel: data = self.api.metadata_channel(self.channel) else: # pragma: no cover return self.id, self.author, self.category, self.title = data except (PluginError, TypeError): pass def _access_token(self, is_live, channel_or_vod): try: sig, token = self.api.access_token(is_live, channel_or_vod) except (PluginError, TypeError): raise NoStreamsError(self.url) try: restricted_bitrates = self.api.parse_token(token) except PluginError: restricted_bitrates = [] return sig, token, restricted_bitrates def _switch_to_hosted_channel(self): disabled = self.options.get("disable_hosting") hosted_chain = [self.channel] while True: try: login, display_name = self.api.hosted_channel(self.channel) except PluginError: return False log.info(f"{self.channel} is hosting {login}") if disabled: log.info("hosting was disabled by command line option") return True if login in hosted_chain: loop = " -> ".join(hosted_chain + [login]) log.error(f"A loop of hosted channels has been detected, cannot find a playable stream. ({loop})") return True hosted_chain.append(login) log.info(f"switching to {login}") self.channel = login self.author = display_name def _check_for_rerun(self): if not self.options.get("disable_reruns"): return False try: stream = self.api.stream_metadata(self.channel) if stream["type"] != "live": log.info("Reruns were disabled by command line option") return True except (PluginError, TypeError): pass return False def _get_hls_streams_live(self): if self._switch_to_hosted_channel(): return if self._check_for_rerun(): return # only get the token once the channel has been resolved log.debug(f"Getting live HLS streams for {self.channel}") self.session.http.headers.update({"referer": "https://player.twitch.tv", "origin": "https://player.twitch.tv"}) sig, token, restricted_bitrates = self._access_token(True, self.channel) url = self.usher.channel(self.channel, sig=sig, token=token, fast_bread=True) return self._get_hls_streams(url, restricted_bitrates) def _get_hls_streams_video(self): log.debug(f"Getting HLS streams for video ID {self.video_id}") sig, token, restricted_bitrates = self._access_token(False, self.video_id) url = self.usher.video(self.video_id, nauthsig=sig, nauth=token) # If the stream is a VOD that is still being recorded, the stream should start at the beginning of the recording return self._get_hls_streams(url, restricted_bitrates, force_restart=True) def _get_hls_streams(self, url, restricted_bitrates, **extra_params): time_offset = self.params.get("t", 0) if time_offset: try: time_offset = hours_minutes_seconds(time_offset) except ValueError: time_offset = 0 try: streams = TwitchHLSStream.parse_variant_playlist( self.session, url, start_offset=time_offset, **extra_params ) except OSError as err: err = str(err) if "404 Client Error" in err or "Failed to parse playlist" in err: return else: raise PluginError(err) for name in restricted_bitrates: if name not in streams: log.warning(f"The quality '{name}' is not available since it requires a subscription.") return streams def _get_clips(self): try: sig, token, streams = self.api.clips(self.clip_name) except (PluginError, TypeError): return for quality, stream in streams: yield quality, HTTPStream(self.session, update_qsd(stream, {"sig": sig, "token": token})) def _get_streams(self): if self.video_id: return self._get_hls_streams_video() elif self.clip_name: return self._get_clips() elif self.channel: return self._get_hls_streams_live() __plugin__ = Twitch
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Retrievers for RAG. """ from abc import ABC, abstractmethod import copy import csv import gzip import numpy as np import os from parlai.core.message import Message import torch import torch.cuda import torch.nn import transformers from tqdm import tqdm try: from transformers import BertTokenizerFast as BertTokenizer except ImportError: from transformers import BertTokenizer from typing import Tuple, List, Dict, Union, Optional, Any from typing_extensions import final from sklearn.feature_extraction.text import TfidfVectorizer from parlai.agents.tfidf_retriever.tfidf_retriever import TfidfRetrieverAgent from parlai.core.agents import create_agent, create_agent_from_model_file from parlai.core.build_data import modelzoo_path from parlai.core.dict import DictionaryAgent from parlai.core.loader import register_agent from parlai.core.opt import Opt from parlai.core.torch_generator_agent import TorchGeneratorAgent from parlai.core.torch_ranker_agent import TorchRankerAgent from parlai.tasks.wizard_of_internet.mutators import chunk_docs_in_message import parlai.tasks.wizard_of_internet.constants as CONST import parlai.utils.logging as logging from parlai.utils.torch import padded_tensor from parlai.utils.typing import TShared from parlai.utils.io import PathManager from parlai.agents.rag.dpr import DprQueryEncoder from parlai.agents.rag.polyfaiss import RagDropoutPolyWrapper from parlai.agents.rag.indexers import DenseHNSWFlatIndexer, indexer_factory from parlai.agents.rag.args import ( RetrieverType, WOW_INDEX_PATH, WOW_PASSAGES_PATH, POLYENCODER_OPT_KEYS, TRANSFORMER_RANKER_BASE_OPT, WOW_COMPRESSED_INDEX_PATH, ) from parlai.agents.rag.retrieve_api import SearchEngineRetriever def load_passage_reader( ctx_file: str, return_dict: bool = True ) -> Union[Dict[str, Tuple[str, str]], List[Tuple[str, str, str]]]: """ Load passages from file, corresponding to a FAISS index. We attempt to read the passages with a csv reader. If passage files are not saved correctly with a csv reader, reads can fail. :param ctxt_file: file to read :return reader: return a reader over the passages """ logging.info(f'Reading data from: {ctx_file}') f_open = gzip.open if ctx_file.endswith(".gz") else open try: passages = {} if return_dict else [] with f_open(ctx_file) as tsvfile: _reader = csv.reader(tsvfile, delimiter='\t') # type: ignore ids = [] for idx, row in tqdm(enumerate(_reader)): if idx == 0: assert row[0] == 'id' ids.append(-1) elif idx <= 1: ids.append(row[0]) if return_dict: passages[row[0]] = (row[1], row[2]) # type: ignore else: passages.append((row[0], row[1], row[2])) # type: ignore continue else: assert int(row[0]) == int(ids[idx - 1]) + 1, "invalid load" if return_dict: passages[row[0]] = (row[1], row[2]) # type: ignore else: passages.append((row[0], row[1], row[2])) # type: ignore ids.append(row[0]) del ids except (csv.Error, AssertionError) as e: passages = {} if return_dict else [] logging.error(f'Exception: {e}') logging.warning('Error in loading csv; loading via readlines') with f_open(ctx_file) as tsvfile: for idx, l in tqdm(enumerate(tsvfile.readlines())): line = l.replace('\n', '').split('\t') # type: ignore assert len(line) == 3 if idx == 0: assert line[0] == 'id' if line[0] != 'id': if return_dict: passages[line[0]] = (line[1], line[2]) # type: ignore else: passages.append((line[0], line[1], line[2])) # type: ignore return passages def load_passages_dict(ctx_file: str) -> Dict[str, Tuple[str, str]]: """ Load passages as a dict. :param ctx_file: file to read :return passages_dict: return a dict mapping passage id to a tuple of (text, title) """ psgs_dict = load_passage_reader(ctx_file, return_dict=True) assert isinstance(psgs_dict, dict) return psgs_dict def load_passages_list(ctx_file: str) -> List[Tuple[str, str, str]]: """ Load passages as a list. :param ctx_file: file to read :return passages_dict: return a list of 3-tuples (id, text, title) """ psgs_list = load_passage_reader(ctx_file, return_dict=False) assert isinstance(psgs_list, list) return psgs_list class Document: """ A Document used in retrieval. """ TITLE_DELIM = ' / ' PASSAGE_DELIM = ' // ' def __init__(self, title: str, text: str, docid: Union[int, str]): assert all(isinstance(t, str) for t in [title, text]) self._title = title self._text = text self._id = str(docid) def get_title(self) -> str: return self._title def get_text(self) -> str: return self._text def get_id(self) -> str: return self._id def __repr__(self): return f"ID: {self._id}\nTitle: {self._title}\nText: {self._text}" def __str__(self): return f"{self._title} | {self._text}" def get_passage_str(self): return f"{self._title.strip()}{self.TITLE_DELIM}{self._text.strip()}{self.PASSAGE_DELIM}" def get_tokenization_str(self): return f"{self._title.strip()}{self.TITLE_DELIM}{self._text.strip()}" BLANK_DOC = Document('', '', '') def argsort_scores_and_docs( scores: torch.Tensor, docs: List[Document], n_docs: int ) -> Tuple[List[Document], torch.Tensor]: """ Sort scores and documents by score, return n_docs ranked docs/scores. :param scores: scores with which to rank :param docs: docs to argsort :param n_docs: number of docs to return :return: (docs, scores) --> sorted documents, according to scores. """ scores_sorter = scores.sort(descending=True) ranked_docs = [docs[idx] for idx in scores_sorter.indices[:n_docs]] ranked_scores = scores_sorter.values[:n_docs] return ranked_docs, ranked_scores def clean_vec( vec: torch.LongTensor, end_idx: int, special_toks: List[int] = None ) -> List[int]: """ Remove special tokens from a tensor prior to text conversion. """ new_vec = [] for i in vec: if i == end_idx: break elif special_toks and i in special_toks: continue new_vec.append(i) return new_vec class RagRetrieverTokenizer: """ Wrapper for various tokenizers used by RAG Query Model. """ VOCAB_PATH = 'vocab.txt' def __init__( self, datapath: str, query_model: str, dictionary: DictionaryAgent, max_length: int = 256, delimiter='\n', ): """ :param query_model: query model type (e.g. bert) :param dictionary: ParlAI dictionary agent :param fast: whether to instantiate fast BertTokenizer :param max_length: maximum length of encoding. """ self.datapath = datapath self.query_model = query_model self.tokenizer = self._init_tokenizer(dictionary) self.max_length = max_length self._delimiter = delimiter def _init_tokenizer( self, dictionary: DictionaryAgent ) -> Union[BertTokenizer, DictionaryAgent]: """ If a regular parlai model, use the regular dictionary. Otherwise, build as necessary :param dictionary: ParlAI dictionary agent """ if self.query_model in ['bert', 'bert_from_parlai_rag']: try: return BertTokenizer.from_pretrained('bert-base-uncased') except (ImportError, OSError): vocab_path = PathManager.get_local_path( os.path.join(self.datapath, "bert_base_uncased", self.VOCAB_PATH) ) return transformers.BertTokenizer.from_pretrained(vocab_path) else: return dictionary def get_pad_idx(self) -> int: """ Return pad token idx. """ if self.query_model in ['bert', 'bert_from_parlai_rag']: return self.tokenizer.pad_token_id else: return self.tokenizer[self.tokenizer.null_token] def get_delimiter(self) -> str: """ Return delimiter. """ return self._delimiter def get_bos_idx(self) -> int: """ Return start token idx. """ if self.query_model in ['bert', 'bert_from_parlai_rag']: return self.tokenizer.bos_token_id or 1 else: return self.tokenizer[self.tokenizer.start_token] def get_eos_idx(self) -> int: """ Return start token idx. """ if self.query_model in ['bert', 'bert_from_parlai_rag']: return self.tokenizer.eos_token_id or 2 else: return self.tokenizer[self.tokenizer.end_token] def encode(self, txt: str, txt_pair: Optional[str] = None) -> List[int]: """ Encode text. :param txt: text to encode :param txt_pair: Optional additional text to encode. Useful if encoding two parts of a text, e.g. title & text. :return encoding: return encoded text. """ if self.query_model in ['bert', 'bert_from_parlai_rag']: txt = txt.lower().strip() if txt_pair: txt_pair = txt_pair.lower().strip() return self.tokenizer.encode( txt, text_pair=txt_pair, add_special_tokens=True, max_length=self.max_length, pad_to_max_length=False, truncation='longest_first', ) else: return self.tokenizer.txt2vec(txt) def decode(self, vec: torch.LongTensor) -> str: """ Decode a token vector into a string. """ if self.query_model in ['bert', 'bert_from_parlai_rag']: return self.tokenizer.decode( clean_vec(vec, self.get_eos_idx()), skip_special_tokens=True ) else: return self.tokenizer.vec2txt( clean_vec( vec, self.get_eos_idx(), special_toks=[ self.get_pad_idx(), self.get_bos_idx(), self.get_eos_idx(), ], ) ) class RagRetriever(torch.nn.Module, ABC): """ RAG Retriever. Provides an interface to the RagModel for retrieving documents. """ def __init__(self, opt: Opt, dictionary: DictionaryAgent, shared: TShared = None): super().__init__() self.retriever_type = RetrieverType(opt['rag_retriever_type']) if not ( ( self.retriever_type in ( RetrieverType.SEARCH_ENGINE, RetrieverType.OBSERVATION_ECHO_RETRIEVER, ) ) or (opt.get('retriever_debug_index') in [None, 'none']) ): if opt.get('retriever_debug_index') == 'exact': opt['path_to_index'] = WOW_INDEX_PATH else: opt['path_to_index'] = WOW_COMPRESSED_INDEX_PATH opt['path_to_dpr_passages'] = WOW_PASSAGES_PATH self.opt = opt self.print_docs = opt.get('print_docs', False) self.max_doc_len = opt['max_doc_token_length'] self.max_query_len = opt['rag_query_truncate'] or 1024 self.end_idx = dictionary[dictionary.end_token] self._tokenizer = RagRetrieverTokenizer( datapath=opt['datapath'], query_model=opt['query_model'], dictionary=dictionary, delimiter=opt.get('delimiter', '\n') or '\n', ) self.fp16 = ( not opt['no_cuda'] and torch.cuda.is_available() and self.opt.get('fp16', False) ) @final def retrieve( self, query: torch.LongTensor ) -> Tuple[List[List[Document]], torch.Tensor]: """ Retrieve documents, given a query vector. :param query: tokenized query :return (docs, scores): docs: list of Documents for each batch example. scores: [bsz, n_docs] document scores """ docs, scores = self.retrieve_and_score(query) if self.print_docs: self.display_docs(docs) self.top_docs = [[str(d) for d in ds] for ds in docs] return docs, scores @abstractmethod def retrieve_and_score( self, query: torch.LongTensor ) -> Tuple[List[List[Document]], torch.Tensor]: """ Retrieve documents for a given query. :param query: tokenized query :return (docs, scores): docs: list of Documents for each batch example. scores: [bsz, n_docs] document scores """ def tokenize_query(self, query: str) -> List[int]: """ Tokenize the query. :param query: query to tokenize :return tokenized_query: return list of tokens """ return self._tokenizer.encode(query) def vectorize_texts( self, input_text: List[str], tokenizer: RagRetrieverTokenizer, max_len: Optional[int] = None, ) -> torch.LongTensor: """ Vectorize a set of input texts with an arbitrary RagRetrieverTokenizer. :param input_text: list of input strings :param tokenizer: tokenizer that encodes the input strings :param max_len: max length to tokenize :return vecs: returns a stacked padded tensor of tokens. """ vecs = [tokenizer.encode(q) for q in input_text] if max_len: vecs = [v[:max_len] for v in vecs] vecs, _ = padded_tensor( vecs, fp16friendly=self.fp16, pad_idx=tokenizer.get_pad_idx(), max_len=max_len, ) return vecs def get_delimiter(self) -> str: """ Return the tokenizer's delimiter. """ return self._tokenizer.get_delimiter() def display_docs(self, top_docs: List[List[Document]]): """ Prints documents. :param top_docs: list of documents for each batch item """ for docs in top_docs: for rank, doc in enumerate(docs): print(f"Rank: {rank}\n{doc}") def share(self) -> TShared: """ Share retriever stuff. Share anything that can be handily used by other retrievers. This is primarily to share things that take up substantial RAM (indices, passages) """ return {} class RagRetrieverReranker(RagRetriever, ABC): """ Trait that carries methods for Reranker-based retrievers. """ def __init__(self, opt: Opt, dictionary: DictionaryAgent, shared: TShared = None): super().__init__(opt, dictionary, shared=shared) self.n_final_docs = opt['n_docs'] @final def retrieve_and_score( self, query: torch.LongTensor ) -> Tuple[List[List[Document]], torch.Tensor]: """ Perform two-stage retrieval; rescore initial set of docs. :param query: query tokens :return (docs, scores): docs: list of Documents for each batch example scores: doc scores """ # 1. Get Initial documents initial_docs, initial_scores = self._retrieve_initial(query) new_scores = self._rescore(query, initial_docs) # 2. Get new scores final_docs: List[List[Document]] = [] final_scores: List[torch.Tensor] = [] new_score_lambda = self._get_new_score_lambda() for i in range(len(initial_docs)): docs_i = initial_docs[i] initial_scores_i = initial_scores[i] scores_i = torch.mul(initial_scores_i, (1 - new_score_lambda)) + torch.mul( new_scores[i], new_score_lambda ) docs_i, scores_i = argsort_scores_and_docs( scores_i, docs_i, self.n_final_docs ) final_docs.append(docs_i) final_scores.append(scores_i) return final_docs, torch.stack(final_scores) @abstractmethod def _retrieve_initial( self, query: torch.LongTensor ) -> Tuple[List[List[Document]], torch.Tensor]: """ Perform initial stage of retrieval. :param query: tokenized query :return (docs, scores): docs: list of Documents for each batch example scores: doc scores """ @abstractmethod def _rescore( self, query: torch.LongTensor, docs: List[List[Document]] ) -> torch.Tensor: """ Rescore retrieved documents. :param query: tokenized query :param docs: List of initially retrieved top docs for each batch example :return scores: return new doc scores. """ @abstractmethod def _get_new_score_lambda(self) -> torch.nn.Parameter: """ Return the lambda used for computing the new score. """ class DPRRetriever(RagRetriever): """ DPR Retriever. """ def __init__(self, opt: Opt, dictionary: DictionaryAgent, shared=None): """ Initialize DPR Retriever. """ super().__init__(opt, dictionary, shared=shared) self.load_index(opt, shared) self.n_docs = opt['n_docs'] self.query_encoder = DprQueryEncoder( opt, dpr_model=opt['query_model'], pretrained_path=opt['dpr_model_file'] ) def load_index(self, opt, shared): if not shared: self.indexer = indexer_factory(opt) index_path = modelzoo_path(opt['datapath'], opt['path_to_index']) passages_path = modelzoo_path(opt['datapath'], opt['path_to_dpr_passages']) embeddings_path = None if opt['path_to_dense_embeddings'] is not None: embeddings_path = modelzoo_path( opt['datapath'], opt['path_to_dense_embeddings'] ) self.indexer.deserialize_from(index_path, embeddings_path) self.passages = load_passages_dict(passages_path) elif shared: self.indexer = shared['indexer'] self.passages = shared['passages'] def share(self) -> TShared: """ Share FAISS retriever and passages. """ shared = super().share() shared['indexer'] = self.indexer shared['passages'] = self.passages return shared def index_retrieve( self, query: torch.Tensor, n_docs: int ) -> Tuple[torch.Tensor, torch.Tensor]: """ Retrieve over FAISS index. :param query: bsz x embed_dim query tensor :param n_docs: number of docs to retrieve :return (ids, scores): ids: [bsz, n_docs] tensor of document IDs scores: [bsz, n_docs] tensor of document scores """ # retrieve docs and scores, reconstruct document embeddings & scores # NOTE: important that detach occurs _for retrieval only_, as we use the # query encodings to compute scores later in this function; if detached, # gradient will not flow to the query encoder. top_docs_and_scores = self.indexer.search( query.cpu().detach().to(torch.float32).numpy(), n_docs ) ids, np_vectors = zip(*top_docs_and_scores) vectors = torch.tensor(np.array(np_vectors)).to(query) if isinstance(self.indexer, DenseHNSWFlatIndexer): vectors = vectors[:, :, :-1] # recompute exact FAISS scores scores = torch.bmm(query.unsqueeze(1), vectors.transpose(1, 2)).squeeze(1) if torch.isnan(scores).sum().item(): raise RuntimeError( '\n[ Document scores are NaN; please look into the built index. ]\n' '[ This generally happens if FAISS cannot separate vectors appropriately. ]\n' '[ If using a compressed index, try building an exact index: ]\n' '[ $ python index_dense_embeddings --indexer-type exact... ]' ) ids = torch.tensor([[int(s) for s in ss] for ss in ids]) return ids, scores def retrieve_and_score( self, query: torch.LongTensor ) -> Tuple[List[List[Document]], torch.Tensor]: """ Retrieve and score. For DPR, we encode query tokens and retrieve from FAISS index. :param query: query tokens :return (docs, scores): docs: list of (text, title) tuples for each batch example scores: doc scores """ query_enc = self.query_encoder(query) top_doc_ids_tensor, top_doc_scores = self.index_retrieve(query_enc, self.n_docs) top_docs, top_doc_ids = [], [] for i in range(query.size(0)): ids_i = [] docs_i = [] for int_id in top_doc_ids_tensor[i]: doc_id = str(int_id.item()) passage = self.passages[doc_id] ids_i.append(doc_id) docs_i.append(Document(title=passage[1], text=passage[0], docid=doc_id)) top_docs.append(docs_i) top_doc_ids.append(ids_i) return top_docs, top_doc_scores class TFIDFRetriever(RagRetriever): """ Use TFIDF to retrieve wikipedia documents. """ def __init__(self, opt: Opt, dictionary: DictionaryAgent, shared: TShared = None): """ Init a TFIDFRetrieverAgent. """ opt['query_model'] = 'tfidf' super().__init__(opt, dictionary, shared=shared) tfidf_opt = { 'model': 'rag_tfidf_retriever', 'model_file': (opt['tfidf_model_path']), 'tfidf_model_path': opt['tfidf_model_path'], 'retriever_num_retrieved': opt['n_docs'], 'retriever_mode': 'keys', 'override': {'model': 'rag_tfidf_retriever', 'remove_title': False}, } self.n_docs = opt['n_docs'] self.max_doc_paragraphs = opt['tfidf_max_doc_paragraphs'] assert self.max_doc_paragraphs != 0 if not shared: self.tfidf_retriever = create_agent(tfidf_opt) self.query_encoder = DprQueryEncoder( opt, dpr_model=opt['query_model'], pretrained_path=opt['dpr_model_file'] ) else: self.tfidf_retriever = shared['tfidf_retriever'] self.query_encoder = shared['query_encoder'] def share(self) -> TShared: shared = super().share() shared['tfidf_retriever'] = self.tfidf_retriever shared['query_encoder'] = self.query_encoder return shared def retrieve_and_score( self, query: torch.LongTensor ) -> Tuple[List[List[Document]], Union[torch.Tensor, List[torch.Tensor]]]: """ Retrieve and score using TFIDF. :param query: query tokens :return (docs, scores): docs: list of (text, title) tuples for each batch example scores: doc scores """ def _build_doc(idx, cand): title = cand.split('\n\n')[0] paragraphs = cand.split('\n\n')[1:] if self.max_doc_paragraphs > 0: paragraphs = paragraphs[: self.max_doc_paragraphs] return Document(title=title, text=' '.join(paragraphs), docid=ids_i[idx]) docs = [] scores = [] for q in query: query_text = self._tokenizer.decode(q) self.tfidf_retriever.observe({'text': query_text, 'episode_done': True}) act = self.tfidf_retriever.act() if 'candidate_scores' not in act: scores_i = [0] * self.n_docs docs_i = [BLANK_DOC] * self.n_docs else: scores_i = act['candidate_scores'] candidate_docs = act['text_candidates'] ids_i = act['candidate_ids'] candidate_docs = [ _build_doc(j, c) for j, c in enumerate(act['text_candidates']) ] docs_i = candidate_docs[: self.n_docs] scores_i = scores_i[: self.n_docs] if len(docs_i) < self.n_docs: # Something went wrong with TFIDF here; need to add null docs logging.warning( f'Ex has less than {self.n_docs} TFIDF docs: {len(docs_i)}' ) num_null = self.n_docs - len(docs_i) docs_i += [BLANK_DOC] * num_null scores_i = np.append(scores_i, [0] * num_null) docs.append(docs_i) scores.append(torch.FloatTensor(scores_i).to(query.device)) scores = torch.stack(scores) return docs, scores class DPRThenTorchReranker(RagRetrieverReranker, DPRRetriever, ABC): """ Base Class for DPR --> TorchRanker Retrievers. Handles some shared functionality. """ def __init__(self, opt: Opt, dictionary: DictionaryAgent, shared: TShared = None): """ Initialize DPR model. It is up to subclasses to initialize rerankers. """ RagRetrieverReranker.__init__(self, opt, dictionary, shared=shared) self.dpr_num_docs = opt['dpr_num_docs'] assert self.dpr_num_docs dpr_opt = copy.deepcopy(opt) dpr_opt['n_docs'] = self.dpr_num_docs DPRRetriever.__init__(self, dpr_opt, dictionary, shared=shared) def get_reranker_opts(self, opt: Opt) -> Dict[str, Any]: """ Provide options used when building the rerankers. Base class ensures that various optimizations (cuda, fp16, parallel) are accounted for. :param opt: base opt :return options_dict: return a dictionary mapping options to values. """ return { 'no_cuda': opt['no_cuda'], 'fp16': opt['fp16'], 'model_parallel': opt['model_parallel'], 'data_parallel': opt['data_parallel'], } def _build_reranker( self, opt: Opt ) -> Tuple[torch.nn.Module, RagRetrieverTokenizer]: """ Builds reranker. :param opt: original opt :return (module, dict) module: the model from the agent created via the options dict: A RagRetrieverTokenizer, dictionary for the created model. """ rerank_opt = copy.deepcopy(opt) rerank_opt = {**TRANSFORMER_RANKER_BASE_OPT, **self.get_reranker_opts(opt)} logging.disable() agent = create_agent(rerank_opt) logging.enable() assert isinstance(agent, TorchRankerAgent) return ( agent.model, RagRetrieverTokenizer(opt['datapath'], '', agent.dict, max_length=360), ) def _retrieve_initial( self, query: torch.LongTensor ) -> Tuple[List[List[Document]], torch.Tensor]: """ Initial DPR retrieval. Just call superclass to retrieve first stage. :param query: encoding of query :param mask: optional query mask :return (docs, scores): docs: list of (text, title) tuples for each batch example scores: doc scores """ return DPRRetriever.retrieve_and_score(self, query) class DPRThenPolyRetriever(DPRThenTorchReranker): """ 2 Stage Retrieval with DPR and Poly-encoder. 1. Retrieve N Docs with DPR 2. Rescore docs with polyencoder """ def __init__(self, opt: Opt, dictionary: DictionaryAgent, shared: TShared = None): """ Initialize a Poly-Encoder Agent. """ # 1. Call super to init DPR super().__init__(opt, dictionary, shared=shared) # 2. Poly-encoder self.polyencoder, self.poly_tokenizer = self._build_reranker(opt) self.register_parameter( "poly_score_lambda", torch.nn.Parameter(torch.tensor([float(opt['poly_score_initial_lambda'])])), ) def _get_new_score_lambda(self) -> torch.nn.Parameter: """ Return the lambda used for computing the new score. """ return self.poly_score_lambda # type: ignore def get_reranker_opts(self, opt: Opt) -> Dict[str, Any]: """ Provide options used when building the polyencoder. :param opt: base opt :return options_dict: return a dictionary mapping options to values. """ from parlai.agents.rag.args import PRETRAINED_RANKER_TYPES init_path = opt['polyencoder_init_model'] if init_path in PRETRAINED_RANKER_TYPES: init_model = f"zoo:pretrained_transformers/poly_model_huge_{opt["polyencoder_init_model"]}/model" dict_file = f"zoo:pretrained_transformers/poly_model_huge_{opt["polyencoder_init_model"]}/model.dict" else: assert os.path.exists(init_path) init_model = init_path dict_file = f'{init_path}.dict' return { 'model': 'transformer/polyencoder', 'init_model': init_model, 'dict_file': dict_file, # necessary opt args 'multitask_weights': [1], **{k: opt[k] for k in POLYENCODER_OPT_KEYS}, **super().get_reranker_opts(opt), } def _rescore( self, query: torch.LongTensor, docs: List[List[Document]] ) -> torch.Tensor: """ Compute Poly-encoder score with initial set of Documents. Scoring taken from PolyencoderAgent.score_candidates :param query: query tokens, used in DPR retrieval. :param docs: List of initially retrieved top docs for each batch example :return new_scores: return scored documents. """ poly_query_vec = self.vectorize_texts( [self._tokenizer.decode(q) for q in query], self.poly_tokenizer, self.max_query_len, ).to(query.device) doc_vecs = torch.stack( [ self.vectorize_texts( [d.get_tokenization_str() for d in docs_i], self.poly_tokenizer, self.max_doc_len, ) for docs_i in docs ] ).to(query.device) ctxt_rep, ctxt_rep_mask, _ = self.polyencoder(ctxt_tokens=poly_query_vec) _, _, cand_rep = self.polyencoder(cand_tokens=doc_vecs) scores = self.polyencoder( ctxt_rep=ctxt_rep, ctxt_rep_mask=ctxt_rep_mask, cand_rep=cand_rep ) return scores class PolyFaissRetriever(DPRThenPolyRetriever): """ Poly-encoder Retriever, using FAISS. Performs FAISS retrieval to retrieve N initial docs; re-ranks according to Poly- encoder score to narrow down to K docs. """ def __init__(self, opt: Opt, dictionary: DictionaryAgent, shared: TShared = None): assert opt['query_model'] == 'dropout_poly' super().__init__(opt, dictionary, shared=shared) self.dropout_poly = RagDropoutPolyWrapper(opt) self.polyencoder = self.dropout_poly.model self.poly_tokenizer = RagRetrieverTokenizer( opt['datapath'], opt['query_model'], self.dropout_poly.dict, max_length=360 ) model = ( self.polyencoder.module if hasattr(self.polyencoder, 'module') else self.polyencoder ) for param in model.encoder_cand.parameters(): # type: ignore # freeze document encoding for PolyFAISS. param.requires_grad = False @register_agent("rag_tfidf_retriever") class RagTfidfRetrieverAgent(TfidfRetrieverAgent): """ Wrapper around TFIDF Retriever to cache retrieved documents. """ def __init__(self, opt: Opt, shared: TShared = None): super().__init__(opt, shared) if not shared: self.docid_to_text = {} else: self.docid_to_text = shared.get('docid_to_text', {}) def share(self) -> TShared: shared = super().share() shared['docid_to_text'] = self.docid_to_text return shared def doc2txt(self, docid): """ Cache document texts during train/eval. """ if docid not in self.docid_to_text: text = super().doc2txt(docid) self.docid_to_text[docid] = text else: text = self.docid_to_text[docid] return text BLANK_SEARCH_DOC = {'url': None, 'content': '', 'title': ''} NO_SEARCH_QUERY = 'no_passages_used' class SearchQueryRetriever(RagRetriever): def __init__(self, opt: Opt, dictionary: DictionaryAgent, shared: TShared): RagRetriever.__init__(self, opt, dictionary, shared=shared) opt['skip_retrieval_token'] = NO_SEARCH_QUERY self.n_docs = opt['n_docs'] self.len_chunk = opt['splitted_chunk_length'] self.doc_chunk_split_mode = opt['doc_chunk_split_mode'] n_doc_chunks = opt['n_ranked_doc_chunks'] chunk_ranker_type = opt['doc_chunks_ranker'] if chunk_ranker_type == 'tfidf': self.chunk_reranker = TfidfChunkRanker(n_doc_chunks) elif chunk_ranker_type == 'head': self.chunk_reranker = HeadChunkRanker(n_doc_chunks) else: self.chunk_reranker = RetrievedChunkRanker( n_doc_chunks, opt['woi_doc_chunk_size'] ) if not shared: self.query_generator = self.init_search_query_generator(opt) else: self.query_generator = shared['query_generator'] self.dict = dictionary self.init_query_encoder(opt) def share(self) -> TShared: shared = super().share() shared['query_generator'] = self.query_generator return shared def init_search_query_generator(self, opt) -> TorchGeneratorAgent: model_file = opt['search_query_generator_model_file'] logging.info('Loading search generator model') logging.disable() search_query_gen_agent = create_agent_from_model_file( model_file, opt_overrides={ 'skip_generation': False, 'inference': opt['search_query_generator_inference'], 'beam_min_length': opt['search_query_generator_beam_min_length'], 'beam_size': opt['search_query_generator_beam_size'], 'text_truncate': opt['search_query_generator_text_truncate'], }, ) logging.enable() logging.info('Search query generator model loading completed!') return search_query_gen_agent def generate_search_query(self, query: torch.LongTensor) -> List[str]: """ Generates a list of queries for the encoded query (context) tensor. """ texts = [self._tokenizer.decode(q) for q in query] obs_list = [] for t in texts: msg = Message({'text': t, 'episode_done': True}) obs_list.append(self.query_generator.observe(msg)) self.query_generator.reset() # Erase the history search_quries = [r['text'] for r in self.query_generator.batch_act(obs_list)] logging.debug(f'Generated search queries {search_quries}') return search_quries def init_query_encoder(self, opt): if hasattr(self, 'query_encoder'): # It is already instantiated return self.query_encoder = DprQueryEncoder( opt, dpr_model=opt['query_model'], pretrained_path=opt['dpr_model_file'] ) def text2tokens(self, txt: str) -> Union[List[str], List[int]]: if self.doc_chunk_split_mode == 'word': return txt.split(' ') else: return self.dict.txt2vec(txt) def tokens2text(self, tokens: Union[List[int], List[str]]) -> str: if self.doc_chunk_split_mode == 'word': return ' '.join(tokens) else: return self.dict.vec2txt(tokens) def pick_chunk(self, query: str, doc_title: str, doc_text: str, doc_url: str): """ Splits the document and returns the selected chunks. The number of returned chunks is controlled by `n_ranked_doc_chunks` in opt. The chunk selection is determined by `doc_chunks_ranker` in the opt. """ if not doc_text: # When there is no search query for the context return [("", 0)] tokens = self.text2tokens(doc_text) if self.opt['doc_chunks_ranker'] != 'woi_chunk_retrieved_docs': doc_chunks = [ self.tokens2text(tokens[i : i + self.len_chunk]) for i in range(0, len(tokens), self.len_chunk) ] else: doc_chunks = self.tokens2text(tokens) return self.chunk_reranker.get_top_chunks(query, doc_title, doc_chunks, doc_url) class SearchQuerySearchEngineRetriever(SearchQueryRetriever): """ A retriever that uses a search engine server for retrieving documents. It instantiates a `SearchEngineRetriever` object that in turns send search queries to an external server for retrieving documents. """ def __init__(self, opt: Opt, dictionary: DictionaryAgent, shared: TShared): super().__init__(opt, dictionary, shared) if not shared: self.search_client = self.initiate_retriever_api(opt) else: self.search_client = shared['search_client'] def share(self) -> TShared: shared = super().share() shared['search_client'] = self.search_client return shared def initiate_retriever_api(self, opt) -> SearchEngineRetriever: logging.info('Creating the search engine retriever.') return SearchEngineRetriever(opt) def _empty_docs(self, num: int): """ Generates the requested number of empty documents. """ return [BLANK_SEARCH_DOC for _ in range(num)] def rank_score(self, rank_id: int): """ Scores the chunks of the retrieved document based on their rank. Note that this is the score for the retrieved document and applies to all its chunks. """ return 1 / (1 + rank_id) def _display_urls(self, search_results): """ Generates a string that lists retrieved URLs (document IDs). """ return '\n'.join([d['url'] for d in search_results if d['url']]) def retrieve_and_score( self, query: torch.LongTensor ) -> Tuple[List[List[Document]], torch.Tensor]: """ Retrieves relevant documents for the query (the conversation context). This method conducts three main steps that are flagged in the main code as well. Step 1: generate search queries for the conversation context batch.This step uses the query generator model (self.query_generator). Step 2: use the search client to retrieve documents.This step uses retrieval API agent (self.search_client) Step 3: generate the list of Document objects from the retrieved content. Here if the documents too long, the code splits them and chooses a chunk based on the selected `doc_chunks_ranker` in the opt. """ # step 1 search_queries = self.generate_search_query(query) # step 2 search_results_batch = self.search_client.retrieve(search_queries, self.n_docs) # step 3 top_docs = [] top_doc_scores = [] max_n_docs: int = self.n_docs for sq, search_results in zip(search_queries, search_results_batch): if not search_results: search_results = self._empty_docs(self.n_docs) elif len(search_results) < self.n_docs: remain_docs = self.n_docs - len(search_results) search_results.extend(self._empty_docs(remain_docs)) docs_i = [] scors_i = [] # Change this debug later logging.debug(f'URLS:\n{self._display_urls(search_results)}') for i, doc in enumerate(search_results): url = doc['url'] title = doc['title'] dcontent = doc['content'] assert type(dcontent) in ( str, list, ), f'Unrecognized retrieved doc: {dcontent}' full_text = ( dcontent if isinstance(dcontent, str) else '\n'.join(doc['content']) ) doc_chunks = [ dc[0] for dc in self.pick_chunk(sq, title, full_text, url) ] for splt_id, splt_content in enumerate(doc_chunks): docs_i.append( Document( docid=url, text=splt_content, title=f'{title}_{splt_id}' ) ) scors_i.append(self.rank_score(i)) max_n_docs = max(max_n_docs, len(docs_i)) top_docs.append(docs_i) top_doc_scores.append(scors_i) # Pad with empty docs for i in range(len(top_docs)): n_empty = max_n_docs - len(top_docs[i]) if n_empty: top_docs[i] = top_docs[i] + [BLANK_DOC] * n_empty top_doc_scores[i] = top_doc_scores[i] + [0] * n_empty self.top_docs = top_docs return top_docs, torch.Tensor(top_doc_scores).to(query.device) class SearchQueryFAISSIndexRetriever(SearchQueryRetriever, DPRRetriever): def __init__(self, opt: Opt, dictionary: DictionaryAgent, shared): SearchQueryRetriever.__init__(self, opt, dictionary, shared=shared) self.load_index(opt, shared) def share(self) -> TShared: shared = SearchQueryRetriever.share(self) shared.update(DPRRetriever.share(self)) return shared def retrieve_and_score( self, query: torch.LongTensor ) -> Tuple[List[List[Document]], torch.Tensor]: """ Retrieves from the FAISS index using a search query. This methods relies on the `retrieve_and_score` method in `RagRetriever` ancestor class. It receive the query (conversation context) and generatess the search term queries based on them. Then uses those search quries (instead of the the query text itself) to retrieve from the FAISS index. """ search_queries = self.generate_search_query(query) tokenized_search_queries, _ = padded_tensor( [self._tokenizer.encode(sq) for sq in search_queries] ) top_docs, top_doc_scores = DPRRetriever.retrieve_and_score( self, tokenized_search_queries.to(query.device) ) for query_id in range(len(top_docs)): if search_queries[query_id] == NO_SEARCH_QUERY: top_docs[query_id] = [BLANK_DOC for _ in range(self.n_docs)] return top_docs, top_doc_scores class ObservationEchoRetriever(RagRetriever): """ This retriever returns (echos) documents that are already passed to it to return. Use this only with GoldFiD agents. It relies on the retrieved docs being included in the observed example of the agent. """ def __init__(self, opt: Opt, dictionary: DictionaryAgent, shared: TShared = None): self._delimiter = '\n' self.n_docs = opt['n_docs'] self._query_ids = dict() self._saved_docs = dict() self._largest_seen_idx = -1 super().__init__(opt, dictionary, shared=shared) def add_retrieve_doc(self, query: str, retrieved_docs: List[Document]): self._largest_seen_idx += 1 new_idx = self._largest_seen_idx if new_idx in self._query_ids.values() or new_idx in self._saved_docs: raise RuntimeError( "Nonunique new_idx created in add_retrieve_doc in ObservationEchoRetriever \n" "this might return the same set of docs for two distinct queries" ) self._query_ids[query] = new_idx self._saved_docs[new_idx] = retrieved_docs or [ BLANK_DOC for _ in range(self.n_docs) ] def tokenize_query(self, query: str) -> List[int]: return [self._query_ids[query]] def get_delimiter(self) -> str: return self._delimiter def _clear_mapping(self): self._query_ids = dict() self._saved_docs = dict() self._largest_seen_idx = -1 def retrieve_and_score( self, query: torch.LongTensor ) -> Tuple[List[List[Document]], torch.Tensor]: batch_size = query.size(0) retrieved_docs = [] for endoded_query in query.tolist(): docs_retrieve_idx = endoded_query[0] retrieved_docs.append(self._saved_docs[docs_retrieve_idx]) # Some arbitrary scoring of docs max_num_docs = max([len(rtds) for rtds in retrieved_docs]) retrieved_doc_scores = torch.Tensor([1 / (1 + i) for i in range(max_num_docs)]) retrieved_doc_scores = retrieved_doc_scores.repeat(batch_size, 1).to( query.device ) # empty the 2 mappings after each retrieval self._clear_mapping() return retrieved_docs, retrieved_doc_scores class DocumentChunkRanker: """ Base class for controlling splitting long documents and selecting relevant chunks. """ def __init__(self, n_retrieved_chunks): self.n_ret_chunks = n_retrieved_chunks @abstractmethod def get_top_chunks( self, query: str, doc_title: str, doc_chunks: Union[List[str], str], doc_url: str, ): """ Ranks documents (chunk) based on their relevance to `query` """ class HeadChunkRanker(DocumentChunkRanker): """ Returns the head chunks only. """ def get_top_chunks( self, query: str, doc_title: str, doc_chunks: Union[List[str], str], doc_url: str, ): """ Return chunks in doc-present order. """ return [(c,) for c in doc_chunks[: self.n_ret_chunks]] class RetrievedChunkRanker(DocumentChunkRanker): """ Utilize retrieved doc chunk mutator. """ def __init__(self, n_retrieved_chunks, chunk_size: int = 500): super().__init__(n_retrieved_chunks) self.chunk_size = chunk_size def get_top_chunks( self, query: str, doc_title: str, doc_chunks: Union[List[str], str], doc_url: str, ): """ Return chunks according to the woi_chunk_retrieved_docs_mutator """ if isinstance(doc_chunks, list): docs = ''.join(doc_chunks) else: assert isinstance(doc_chunks, str) docs = doc_chunks chunks = chunk_docs_in_message( Message( { CONST.RETRIEVED_DOCS: [docs], CONST.RETRIEVED_DOCS_TITLES: [doc_title], CONST.RETRIEVED_DOCS_URLS: [doc_url], CONST.SELECTED_SENTENCES: [CONST.NO_SELECTED_SENTENCES_TOKEN], } ), self.chunk_size, )[CONST.RETRIEVED_DOCS] return [(c,) for c in chunks[: self.n_ret_chunks]] class TfidfChunkRanker(DocumentChunkRanker): """ Uses TF-IDF to compare chunks to the original search query. """ def __init__(self, n_retrieved_chunks): super().__init__(n_retrieved_chunks) self._vectorizer = TfidfVectorizer() def get_top_chunks( self, query: str, doc_title: str, doc_chunks: Union[List[str], str], doc_url: str, ): assert isinstance(doc_chunks, list) vectorized_corpus = self._vectorizer.fit_transform(doc_chunks + [query]) docs_vec = vectorized_corpus[:-1, :] q_vec = vectorized_corpus[-1, :] scores = np.hstack((q_vec * docs_vec.transpose()).toarray()) top_chunk_ids = np.argsort(-scores)[: self.n_ret_chunks] return [(doc_chunks[i], scores[i]) for i in top_chunk_ids] def retriever_factory( opt: Opt, dictionary: DictionaryAgent, shared=None ) -> Optional[RagRetriever]: """ Build retriever. :param opt: ParlAI Opt :param dictionary: dictionary agent :param shared: shared objects. :return retriever: return a retriever for RAG. """ if opt.get('converting'): return None # only build retriever when not converting a BART model retriever = RetrieverType(opt['rag_retriever_type']) if retriever is RetrieverType.DPR: return DPRRetriever(opt, dictionary, shared=shared) elif retriever is RetrieverType.TFIDF: return TFIDFRetriever(opt, dictionary, shared=shared) elif retriever is RetrieverType.DPR_THEN_POLY: return DPRThenPolyRetriever(opt, dictionary, shared=shared) elif retriever is RetrieverType.POLY_FAISS: return PolyFaissRetriever(opt, dictionary, shared=shared) elif retriever is RetrieverType.SEARCH_ENGINE: return SearchQuerySearchEngineRetriever(opt, dictionary, shared=shared) elif retriever is RetrieverType.SEARCH_TERM_FAISS: return SearchQueryFAISSIndexRetriever(opt, dictionary, shared=shared) elif retriever is RetrieverType.OBSERVATION_ECHO_RETRIEVER: return ObservationEchoRetriever(opt, dictionary, shared=shared)
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Retrievers for RAG. """ from abc import ABC, abstractmethod import copy import csv import gzip import numpy as np import os from parlai.core.message import Message import torch import torch.cuda import torch.nn import transformers from tqdm import tqdm try: from transformers import BertTokenizerFast as BertTokenizer except ImportError: from transformers import BertTokenizer from typing import Tuple, List, Dict, Union, Optional, Any from typing_extensions import final from sklearn.feature_extraction.text import TfidfVectorizer from parlai.agents.tfidf_retriever.tfidf_retriever import TfidfRetrieverAgent from parlai.core.agents import create_agent, create_agent_from_model_file from parlai.core.build_data import modelzoo_path from parlai.core.dict import DictionaryAgent from parlai.core.loader import register_agent from parlai.core.opt import Opt from parlai.core.torch_generator_agent import TorchGeneratorAgent from parlai.core.torch_ranker_agent import TorchRankerAgent from parlai.tasks.wizard_of_internet.mutators import chunk_docs_in_message import parlai.tasks.wizard_of_internet.constants as CONST import parlai.utils.logging as logging from parlai.utils.torch import padded_tensor from parlai.utils.typing import TShared from parlai.utils.io import PathManager from parlai.agents.rag.dpr import DprQueryEncoder from parlai.agents.rag.polyfaiss import RagDropoutPolyWrapper from parlai.agents.rag.indexers import DenseHNSWFlatIndexer, indexer_factory from parlai.agents.rag.args import ( RetrieverType, WOW_INDEX_PATH, WOW_PASSAGES_PATH, POLYENCODER_OPT_KEYS, TRANSFORMER_RANKER_BASE_OPT, WOW_COMPRESSED_INDEX_PATH, ) from parlai.agents.rag.retrieve_api import SearchEngineRetriever def load_passage_reader( ctx_file: str, return_dict: bool = True ) -> Union[Dict[str, Tuple[str, str]], List[Tuple[str, str, str]]]: """ Load passages from file, corresponding to a FAISS index. We attempt to read the passages with a csv reader. If passage files are not saved correctly with a csv reader, reads can fail. :param ctxt_file: file to read :return reader: return a reader over the passages """ logging.info(f'Reading data from: {ctx_file}') f_open = gzip.open if ctx_file.endswith(".gz") else open try: passages = {} if return_dict else [] with f_open(ctx_file) as tsvfile: _reader = csv.reader(tsvfile, delimiter='\t') # type: ignore ids = [] for idx, row in tqdm(enumerate(_reader)): if idx == 0: assert row[0] == 'id' ids.append(-1) elif idx <= 1: ids.append(row[0]) if return_dict: passages[row[0]] = (row[1], row[2]) # type: ignore else: passages.append((row[0], row[1], row[2])) # type: ignore continue else: assert int(row[0]) == int(ids[idx - 1]) + 1, "invalid load" if return_dict: passages[row[0]] = (row[1], row[2]) # type: ignore else: passages.append((row[0], row[1], row[2])) # type: ignore ids.append(row[0]) del ids except (csv.Error, AssertionError) as e: passages = {} if return_dict else [] logging.error(f'Exception: {e}') logging.warning('Error in loading csv; loading via readlines') with f_open(ctx_file) as tsvfile: for idx, l in tqdm(enumerate(tsvfile.readlines())): line = l.replace('\n', '').split('\t') # type: ignore assert len(line) == 3 if idx == 0: assert line[0] == 'id' if line[0] != 'id': if return_dict: passages[line[0]] = (line[1], line[2]) # type: ignore else: passages.append((line[0], line[1], line[2])) # type: ignore return passages def load_passages_dict(ctx_file: str) -> Dict[str, Tuple[str, str]]: """ Load passages as a dict. :param ctx_file: file to read :return passages_dict: return a dict mapping passage id to a tuple of (text, title) """ psgs_dict = load_passage_reader(ctx_file, return_dict=True) assert isinstance(psgs_dict, dict) return psgs_dict def load_passages_list(ctx_file: str) -> List[Tuple[str, str, str]]: """ Load passages as a list. :param ctx_file: file to read :return passages_dict: return a list of 3-tuples (id, text, title) """ psgs_list = load_passage_reader(ctx_file, return_dict=False) assert isinstance(psgs_list, list) return psgs_list class Document: """ A Document used in retrieval. """ TITLE_DELIM = ' / ' PASSAGE_DELIM = ' // ' def __init__(self, title: str, text: str, docid: Union[int, str]): assert all(isinstance(t, str) for t in [title, text]) self._title = title self._text = text self._id = str(docid) def get_title(self) -> str: return self._title def get_text(self) -> str: return self._text def get_id(self) -> str: return self._id def __repr__(self): return f"ID: {self._id}\nTitle: {self._title}\nText: {self._text}" def __str__(self): return f"{self._title} | {self._text}" def get_passage_str(self): return f"{self._title.strip()}{self.TITLE_DELIM}{self._text.strip()}{self.PASSAGE_DELIM}" def get_tokenization_str(self): return f"{self._title.strip()}{self.TITLE_DELIM}{self._text.strip()}" BLANK_DOC = Document('', '', '') def argsort_scores_and_docs( scores: torch.Tensor, docs: List[Document], n_docs: int ) -> Tuple[List[Document], torch.Tensor]: """ Sort scores and documents by score, return n_docs ranked docs/scores. :param scores: scores with which to rank :param docs: docs to argsort :param n_docs: number of docs to return :return: (docs, scores) --> sorted documents, according to scores. """ scores_sorter = scores.sort(descending=True) ranked_docs = [docs[idx] for idx in scores_sorter.indices[:n_docs]] ranked_scores = scores_sorter.values[:n_docs] return ranked_docs, ranked_scores def clean_vec( vec: torch.LongTensor, end_idx: int, special_toks: List[int] = None ) -> List[int]: """ Remove special tokens from a tensor prior to text conversion. """ new_vec = [] for i in vec: if i == end_idx: break elif special_toks and i in special_toks: continue new_vec.append(i) return new_vec class RagRetrieverTokenizer: """ Wrapper for various tokenizers used by RAG Query Model. """ VOCAB_PATH = 'vocab.txt' def __init__( self, datapath: str, query_model: str, dictionary: DictionaryAgent, max_length: int = 256, delimiter='\n', ): """ :param query_model: query model type (e.g. bert) :param dictionary: ParlAI dictionary agent :param fast: whether to instantiate fast BertTokenizer :param max_length: maximum length of encoding. """ self.datapath = datapath self.query_model = query_model self.tokenizer = self._init_tokenizer(dictionary) self.max_length = max_length self._delimiter = delimiter def _init_tokenizer( self, dictionary: DictionaryAgent ) -> Union[BertTokenizer, DictionaryAgent]: """ If a regular parlai model, use the regular dictionary. Otherwise, build as necessary :param dictionary: ParlAI dictionary agent """ if self.query_model in ['bert', 'bert_from_parlai_rag']: try: return BertTokenizer.from_pretrained('bert-base-uncased') except (ImportError, OSError): vocab_path = PathManager.get_local_path( os.path.join(self.datapath, "bert_base_uncased", self.VOCAB_PATH) ) return transformers.BertTokenizer.from_pretrained(vocab_path) else: return dictionary def get_pad_idx(self) -> int: """ Return pad token idx. """ if self.query_model in ['bert', 'bert_from_parlai_rag']: return self.tokenizer.pad_token_id else: return self.tokenizer[self.tokenizer.null_token] def get_delimiter(self) -> str: """ Return delimiter. """ return self._delimiter def get_bos_idx(self) -> int: """ Return start token idx. """ if self.query_model in ['bert', 'bert_from_parlai_rag']: return self.tokenizer.bos_token_id or 1 else: return self.tokenizer[self.tokenizer.start_token] def get_eos_idx(self) -> int: """ Return start token idx. """ if self.query_model in ['bert', 'bert_from_parlai_rag']: return self.tokenizer.eos_token_id or 2 else: return self.tokenizer[self.tokenizer.end_token] def encode(self, txt: str, txt_pair: Optional[str] = None) -> List[int]: """ Encode text. :param txt: text to encode :param txt_pair: Optional additional text to encode. Useful if encoding two parts of a text, e.g. title & text. :return encoding: return encoded text. """ if self.query_model in ['bert', 'bert_from_parlai_rag']: txt = txt.lower().strip() if txt_pair: txt_pair = txt_pair.lower().strip() return self.tokenizer.encode( txt, text_pair=txt_pair, add_special_tokens=True, max_length=self.max_length, pad_to_max_length=False, truncation='longest_first', ) else: return self.tokenizer.txt2vec(txt) def decode(self, vec: torch.LongTensor) -> str: """ Decode a token vector into a string. """ if self.query_model in ['bert', 'bert_from_parlai_rag']: return self.tokenizer.decode( clean_vec(vec, self.get_eos_idx()), skip_special_tokens=True ) else: return self.tokenizer.vec2txt( clean_vec( vec, self.get_eos_idx(), special_toks=[ self.get_pad_idx(), self.get_bos_idx(), self.get_eos_idx(), ], ) ) class RagRetriever(torch.nn.Module, ABC): """ RAG Retriever. Provides an interface to the RagModel for retrieving documents. """ def __init__(self, opt: Opt, dictionary: DictionaryAgent, shared: TShared = None): super().__init__() self.retriever_type = RetrieverType(opt['rag_retriever_type']) if not ( ( self.retriever_type in ( RetrieverType.SEARCH_ENGINE, RetrieverType.OBSERVATION_ECHO_RETRIEVER, ) ) or (opt.get('retriever_debug_index') in [None, 'none']) ): if opt.get('retriever_debug_index') == 'exact': opt['path_to_index'] = WOW_INDEX_PATH else: opt['path_to_index'] = WOW_COMPRESSED_INDEX_PATH opt['path_to_dpr_passages'] = WOW_PASSAGES_PATH self.opt = opt self.print_docs = opt.get('print_docs', False) self.max_doc_len = opt['max_doc_token_length'] self.max_query_len = opt['rag_query_truncate'] or 1024 self.end_idx = dictionary[dictionary.end_token] self._tokenizer = RagRetrieverTokenizer( datapath=opt['datapath'], query_model=opt['query_model'], dictionary=dictionary, delimiter=opt.get('delimiter', '\n') or '\n', ) self.fp16 = ( not opt['no_cuda'] and torch.cuda.is_available() and self.opt.get('fp16', False) ) @final def retrieve( self, query: torch.LongTensor ) -> Tuple[List[List[Document]], torch.Tensor]: """ Retrieve documents, given a query vector. :param query: tokenized query :return (docs, scores): docs: list of Documents for each batch example. scores: [bsz, n_docs] document scores """ docs, scores = self.retrieve_and_score(query) if self.print_docs: self.display_docs(docs) self.top_docs = [[str(d) for d in ds] for ds in docs] return docs, scores @abstractmethod def retrieve_and_score( self, query: torch.LongTensor ) -> Tuple[List[List[Document]], torch.Tensor]: """ Retrieve documents for a given query. :param query: tokenized query :return (docs, scores): docs: list of Documents for each batch example. scores: [bsz, n_docs] document scores """ def tokenize_query(self, query: str) -> List[int]: """ Tokenize the query. :param query: query to tokenize :return tokenized_query: return list of tokens """ return self._tokenizer.encode(query) def vectorize_texts( self, input_text: List[str], tokenizer: RagRetrieverTokenizer, max_len: Optional[int] = None, ) -> torch.LongTensor: """ Vectorize a set of input texts with an arbitrary RagRetrieverTokenizer. :param input_text: list of input strings :param tokenizer: tokenizer that encodes the input strings :param max_len: max length to tokenize :return vecs: returns a stacked padded tensor of tokens. """ vecs = [tokenizer.encode(q) for q in input_text] if max_len: vecs = [v[:max_len] for v in vecs] vecs, _ = padded_tensor( vecs, fp16friendly=self.fp16, pad_idx=tokenizer.get_pad_idx(), max_len=max_len, ) return vecs def get_delimiter(self) -> str: """ Return the tokenizer's delimiter. """ return self._tokenizer.get_delimiter() def display_docs(self, top_docs: List[List[Document]]): """ Prints documents. :param top_docs: list of documents for each batch item """ for docs in top_docs: for rank, doc in enumerate(docs): print(f"Rank: {rank}\n{doc}") def share(self) -> TShared: """ Share retriever stuff. Share anything that can be handily used by other retrievers. This is primarily to share things that take up substantial RAM (indices, passages) """ return {} class RagRetrieverReranker(RagRetriever, ABC): """ Trait that carries methods for Reranker-based retrievers. """ def __init__(self, opt: Opt, dictionary: DictionaryAgent, shared: TShared = None): super().__init__(opt, dictionary, shared=shared) self.n_final_docs = opt['n_docs'] @final def retrieve_and_score( self, query: torch.LongTensor ) -> Tuple[List[List[Document]], torch.Tensor]: """ Perform two-stage retrieval; rescore initial set of docs. :param query: query tokens :return (docs, scores): docs: list of Documents for each batch example scores: doc scores """ # 1. Get Initial documents initial_docs, initial_scores = self._retrieve_initial(query) new_scores = self._rescore(query, initial_docs) # 2. Get new scores final_docs: List[List[Document]] = [] final_scores: List[torch.Tensor] = [] new_score_lambda = self._get_new_score_lambda() for i in range(len(initial_docs)): docs_i = initial_docs[i] initial_scores_i = initial_scores[i] scores_i = torch.mul(initial_scores_i, (1 - new_score_lambda)) + torch.mul( new_scores[i], new_score_lambda ) docs_i, scores_i = argsort_scores_and_docs( scores_i, docs_i, self.n_final_docs ) final_docs.append(docs_i) final_scores.append(scores_i) return final_docs, torch.stack(final_scores) @abstractmethod def _retrieve_initial( self, query: torch.LongTensor ) -> Tuple[List[List[Document]], torch.Tensor]: """ Perform initial stage of retrieval. :param query: tokenized query :return (docs, scores): docs: list of Documents for each batch example scores: doc scores """ @abstractmethod def _rescore( self, query: torch.LongTensor, docs: List[List[Document]] ) -> torch.Tensor: """ Rescore retrieved documents. :param query: tokenized query :param docs: List of initially retrieved top docs for each batch example :return scores: return new doc scores. """ @abstractmethod def _get_new_score_lambda(self) -> torch.nn.Parameter: """ Return the lambda used for computing the new score. """ class DPRRetriever(RagRetriever): """ DPR Retriever. """ def __init__(self, opt: Opt, dictionary: DictionaryAgent, shared=None): """ Initialize DPR Retriever. """ super().__init__(opt, dictionary, shared=shared) self.load_index(opt, shared) self.n_docs = opt['n_docs'] self.query_encoder = DprQueryEncoder( opt, dpr_model=opt['query_model'], pretrained_path=opt['dpr_model_file'] ) def load_index(self, opt, shared): if not shared: self.indexer = indexer_factory(opt) index_path = modelzoo_path(opt['datapath'], opt['path_to_index']) passages_path = modelzoo_path(opt['datapath'], opt['path_to_dpr_passages']) embeddings_path = None if opt['path_to_dense_embeddings'] is not None: embeddings_path = modelzoo_path( opt['datapath'], opt['path_to_dense_embeddings'] ) self.indexer.deserialize_from(index_path, embeddings_path) self.passages = load_passages_dict(passages_path) elif shared: self.indexer = shared['indexer'] self.passages = shared['passages'] def share(self) -> TShared: """ Share FAISS retriever and passages. """ shared = super().share() shared['indexer'] = self.indexer shared['passages'] = self.passages return shared def index_retrieve( self, query: torch.Tensor, n_docs: int ) -> Tuple[torch.Tensor, torch.Tensor]: """ Retrieve over FAISS index. :param query: bsz x embed_dim query tensor :param n_docs: number of docs to retrieve :return (ids, scores): ids: [bsz, n_docs] tensor of document IDs scores: [bsz, n_docs] tensor of document scores """ # retrieve docs and scores, reconstruct document embeddings & scores # NOTE: important that detach occurs _for retrieval only_, as we use the # query encodings to compute scores later in this function; if detached, # gradient will not flow to the query encoder. top_docs_and_scores = self.indexer.search( query.cpu().detach().to(torch.float32).numpy(), n_docs ) ids, np_vectors = zip(*top_docs_and_scores) vectors = torch.tensor(np.array(np_vectors)).to(query) if isinstance(self.indexer, DenseHNSWFlatIndexer): vectors = vectors[:, :, :-1] # recompute exact FAISS scores scores = torch.bmm(query.unsqueeze(1), vectors.transpose(1, 2)).squeeze(1) if torch.isnan(scores).sum().item(): raise RuntimeError( '\n[ Document scores are NaN; please look into the built index. ]\n' '[ This generally happens if FAISS cannot separate vectors appropriately. ]\n' '[ If using a compressed index, try building an exact index: ]\n' '[ $ python index_dense_embeddings --indexer-type exact... ]' ) ids = torch.tensor([[int(s) for s in ss] for ss in ids]) return ids, scores def retrieve_and_score( self, query: torch.LongTensor ) -> Tuple[List[List[Document]], torch.Tensor]: """ Retrieve and score. For DPR, we encode query tokens and retrieve from FAISS index. :param query: query tokens :return (docs, scores): docs: list of (text, title) tuples for each batch example scores: doc scores """ query_enc = self.query_encoder(query) top_doc_ids_tensor, top_doc_scores = self.index_retrieve(query_enc, self.n_docs) top_docs, top_doc_ids = [], [] for i in range(query.size(0)): ids_i = [] docs_i = [] for int_id in top_doc_ids_tensor[i]: doc_id = str(int_id.item()) passage = self.passages[doc_id] ids_i.append(doc_id) docs_i.append(Document(title=passage[1], text=passage[0], docid=doc_id)) top_docs.append(docs_i) top_doc_ids.append(ids_i) return top_docs, top_doc_scores class TFIDFRetriever(RagRetriever): """ Use TFIDF to retrieve wikipedia documents. """ def __init__(self, opt: Opt, dictionary: DictionaryAgent, shared: TShared = None): """ Init a TFIDFRetrieverAgent. """ opt['query_model'] = 'tfidf' super().__init__(opt, dictionary, shared=shared) tfidf_opt = { 'model': 'rag_tfidf_retriever', 'model_file': (opt['tfidf_model_path']), 'tfidf_model_path': opt['tfidf_model_path'], 'retriever_num_retrieved': opt['n_docs'], 'retriever_mode': 'keys', 'override': {'model': 'rag_tfidf_retriever', 'remove_title': False}, } self.n_docs = opt['n_docs'] self.max_doc_paragraphs = opt['tfidf_max_doc_paragraphs'] assert self.max_doc_paragraphs != 0 if not shared: self.tfidf_retriever = create_agent(tfidf_opt) self.query_encoder = DprQueryEncoder( opt, dpr_model=opt['query_model'], pretrained_path=opt['dpr_model_file'] ) else: self.tfidf_retriever = shared['tfidf_retriever'] self.query_encoder = shared['query_encoder'] def share(self) -> TShared: shared = super().share() shared['tfidf_retriever'] = self.tfidf_retriever shared['query_encoder'] = self.query_encoder return shared def retrieve_and_score( self, query: torch.LongTensor ) -> Tuple[List[List[Document]], Union[torch.Tensor, List[torch.Tensor]]]: """ Retrieve and score using TFIDF. :param query: query tokens :return (docs, scores): docs: list of (text, title) tuples for each batch example scores: doc scores """ def _build_doc(idx, cand): title = cand.split('\n\n')[0] paragraphs = cand.split('\n\n')[1:] if self.max_doc_paragraphs > 0: paragraphs = paragraphs[: self.max_doc_paragraphs] return Document(title=title, text=' '.join(paragraphs), docid=ids_i[idx]) docs = [] scores = [] for q in query: query_text = self._tokenizer.decode(q) self.tfidf_retriever.observe({'text': query_text, 'episode_done': True}) act = self.tfidf_retriever.act() if 'candidate_scores' not in act: scores_i = [0] * self.n_docs docs_i = [BLANK_DOC] * self.n_docs else: scores_i = act['candidate_scores'] candidate_docs = act['text_candidates'] ids_i = act['candidate_ids'] candidate_docs = [ _build_doc(j, c) for j, c in enumerate(act['text_candidates']) ] docs_i = candidate_docs[: self.n_docs] scores_i = scores_i[: self.n_docs] if len(docs_i) < self.n_docs: # Something went wrong with TFIDF here; need to add null docs logging.warning( f'Ex has less than {self.n_docs} TFIDF docs: {len(docs_i)}' ) num_null = self.n_docs - len(docs_i) docs_i += [BLANK_DOC] * num_null scores_i = np.append(scores_i, [0] * num_null) docs.append(docs_i) scores.append(torch.FloatTensor(scores_i).to(query.device)) scores = torch.stack(scores) return docs, scores class DPRThenTorchReranker(RagRetrieverReranker, DPRRetriever, ABC): """ Base Class for DPR --> TorchRanker Retrievers. Handles some shared functionality. """ def __init__(self, opt: Opt, dictionary: DictionaryAgent, shared: TShared = None): """ Initialize DPR model. It is up to subclasses to initialize rerankers. """ RagRetrieverReranker.__init__(self, opt, dictionary, shared=shared) self.dpr_num_docs = opt['dpr_num_docs'] assert self.dpr_num_docs dpr_opt = copy.deepcopy(opt) dpr_opt['n_docs'] = self.dpr_num_docs DPRRetriever.__init__(self, dpr_opt, dictionary, shared=shared) def get_reranker_opts(self, opt: Opt) -> Dict[str, Any]: """ Provide options used when building the rerankers. Base class ensures that various optimizations (cuda, fp16, parallel) are accounted for. :param opt: base opt :return options_dict: return a dictionary mapping options to values. """ return { 'no_cuda': opt['no_cuda'], 'fp16': opt['fp16'], 'model_parallel': opt['model_parallel'], 'data_parallel': opt['data_parallel'], } def _build_reranker( self, opt: Opt ) -> Tuple[torch.nn.Module, RagRetrieverTokenizer]: """ Builds reranker. :param opt: original opt :return (module, dict) module: the model from the agent created via the options dict: A RagRetrieverTokenizer, dictionary for the created model. """ rerank_opt = copy.deepcopy(opt) rerank_opt = {**TRANSFORMER_RANKER_BASE_OPT, **self.get_reranker_opts(opt)} logging.disable() agent = create_agent(rerank_opt) logging.enable() assert isinstance(agent, TorchRankerAgent) return ( agent.model, RagRetrieverTokenizer(opt['datapath'], '', agent.dict, max_length=360), ) def _retrieve_initial( self, query: torch.LongTensor ) -> Tuple[List[List[Document]], torch.Tensor]: """ Initial DPR retrieval. Just call superclass to retrieve first stage. :param query: encoding of query :param mask: optional query mask :return (docs, scores): docs: list of (text, title) tuples for each batch example scores: doc scores """ return DPRRetriever.retrieve_and_score(self, query) class DPRThenPolyRetriever(DPRThenTorchReranker): """ 2 Stage Retrieval with DPR and Poly-encoder. 1. Retrieve N Docs with DPR 2. Rescore docs with polyencoder """ def __init__(self, opt: Opt, dictionary: DictionaryAgent, shared: TShared = None): """ Initialize a Poly-Encoder Agent. """ # 1. Call super to init DPR super().__init__(opt, dictionary, shared=shared) # 2. Poly-encoder self.polyencoder, self.poly_tokenizer = self._build_reranker(opt) self.register_parameter( "poly_score_lambda", torch.nn.Parameter(torch.tensor([float(opt['poly_score_initial_lambda'])])), ) def _get_new_score_lambda(self) -> torch.nn.Parameter: """ Return the lambda used for computing the new score. """ return self.poly_score_lambda # type: ignore def get_reranker_opts(self, opt: Opt) -> Dict[str, Any]: """ Provide options used when building the polyencoder. :param opt: base opt :return options_dict: return a dictionary mapping options to values. """ from parlai.agents.rag.args import PRETRAINED_RANKER_TYPES init_path = opt['polyencoder_init_model'] if init_path in PRETRAINED_RANKER_TYPES: init_model = f"zoo:pretrained_transformers/poly_model_huge_{opt['polyencoder_init_model']}/model" dict_file = f"zoo:pretrained_transformers/poly_model_huge_{opt['polyencoder_init_model']}/model.dict" else: assert os.path.exists(init_path) init_model = init_path dict_file = f'{init_path}.dict' return { 'model': 'transformer/polyencoder', 'init_model': init_model, 'dict_file': dict_file, # necessary opt args 'multitask_weights': [1], **{k: opt[k] for k in POLYENCODER_OPT_KEYS}, **super().get_reranker_opts(opt), } def _rescore( self, query: torch.LongTensor, docs: List[List[Document]] ) -> torch.Tensor: """ Compute Poly-encoder score with initial set of Documents. Scoring taken from PolyencoderAgent.score_candidates :param query: query tokens, used in DPR retrieval. :param docs: List of initially retrieved top docs for each batch example :return new_scores: return scored documents. """ poly_query_vec = self.vectorize_texts( [self._tokenizer.decode(q) for q in query], self.poly_tokenizer, self.max_query_len, ).to(query.device) doc_vecs = torch.stack( [ self.vectorize_texts( [d.get_tokenization_str() for d in docs_i], self.poly_tokenizer, self.max_doc_len, ) for docs_i in docs ] ).to(query.device) ctxt_rep, ctxt_rep_mask, _ = self.polyencoder(ctxt_tokens=poly_query_vec) _, _, cand_rep = self.polyencoder(cand_tokens=doc_vecs) scores = self.polyencoder( ctxt_rep=ctxt_rep, ctxt_rep_mask=ctxt_rep_mask, cand_rep=cand_rep ) return scores class PolyFaissRetriever(DPRThenPolyRetriever): """ Poly-encoder Retriever, using FAISS. Performs FAISS retrieval to retrieve N initial docs; re-ranks according to Poly- encoder score to narrow down to K docs. """ def __init__(self, opt: Opt, dictionary: DictionaryAgent, shared: TShared = None): assert opt['query_model'] == 'dropout_poly' super().__init__(opt, dictionary, shared=shared) self.dropout_poly = RagDropoutPolyWrapper(opt) self.polyencoder = self.dropout_poly.model self.poly_tokenizer = RagRetrieverTokenizer( opt['datapath'], opt['query_model'], self.dropout_poly.dict, max_length=360 ) model = ( self.polyencoder.module if hasattr(self.polyencoder, 'module') else self.polyencoder ) for param in model.encoder_cand.parameters(): # type: ignore # freeze document encoding for PolyFAISS. param.requires_grad = False @register_agent("rag_tfidf_retriever") class RagTfidfRetrieverAgent(TfidfRetrieverAgent): """ Wrapper around TFIDF Retriever to cache retrieved documents. """ def __init__(self, opt: Opt, shared: TShared = None): super().__init__(opt, shared) if not shared: self.docid_to_text = {} else: self.docid_to_text = shared.get('docid_to_text', {}) def share(self) -> TShared: shared = super().share() shared['docid_to_text'] = self.docid_to_text return shared def doc2txt(self, docid): """ Cache document texts during train/eval. """ if docid not in self.docid_to_text: text = super().doc2txt(docid) self.docid_to_text[docid] = text else: text = self.docid_to_text[docid] return text BLANK_SEARCH_DOC = {'url': None, 'content': '', 'title': ''} NO_SEARCH_QUERY = 'no_passages_used' class SearchQueryRetriever(RagRetriever): def __init__(self, opt: Opt, dictionary: DictionaryAgent, shared: TShared): RagRetriever.__init__(self, opt, dictionary, shared=shared) opt['skip_retrieval_token'] = NO_SEARCH_QUERY self.n_docs = opt['n_docs'] self.len_chunk = opt['splitted_chunk_length'] self.doc_chunk_split_mode = opt['doc_chunk_split_mode'] n_doc_chunks = opt['n_ranked_doc_chunks'] chunk_ranker_type = opt['doc_chunks_ranker'] if chunk_ranker_type == 'tfidf': self.chunk_reranker = TfidfChunkRanker(n_doc_chunks) elif chunk_ranker_type == 'head': self.chunk_reranker = HeadChunkRanker(n_doc_chunks) else: self.chunk_reranker = RetrievedChunkRanker( n_doc_chunks, opt['woi_doc_chunk_size'] ) if not shared: self.query_generator = self.init_search_query_generator(opt) else: self.query_generator = shared['query_generator'] self.dict = dictionary self.init_query_encoder(opt) def share(self) -> TShared: shared = super().share() shared['query_generator'] = self.query_generator return shared def init_search_query_generator(self, opt) -> TorchGeneratorAgent: model_file = opt['search_query_generator_model_file'] logging.info('Loading search generator model') logging.disable() search_query_gen_agent = create_agent_from_model_file( model_file, opt_overrides={ 'skip_generation': False, 'inference': opt['search_query_generator_inference'], 'beam_min_length': opt['search_query_generator_beam_min_length'], 'beam_size': opt['search_query_generator_beam_size'], 'text_truncate': opt['search_query_generator_text_truncate'], }, ) logging.enable() logging.info('Search query generator model loading completed!') return search_query_gen_agent def generate_search_query(self, query: torch.LongTensor) -> List[str]: """ Generates a list of queries for the encoded query (context) tensor. """ texts = [self._tokenizer.decode(q) for q in query] obs_list = [] for t in texts: msg = Message({'text': t, 'episode_done': True}) obs_list.append(self.query_generator.observe(msg)) self.query_generator.reset() # Erase the history search_quries = [r['text'] for r in self.query_generator.batch_act(obs_list)] logging.debug(f'Generated search queries {search_quries}') return search_quries def init_query_encoder(self, opt): if hasattr(self, 'query_encoder'): # It is already instantiated return self.query_encoder = DprQueryEncoder( opt, dpr_model=opt['query_model'], pretrained_path=opt['dpr_model_file'] ) def text2tokens(self, txt: str) -> Union[List[str], List[int]]: if self.doc_chunk_split_mode == 'word': return txt.split(' ') else: return self.dict.txt2vec(txt) def tokens2text(self, tokens: Union[List[int], List[str]]) -> str: if self.doc_chunk_split_mode == 'word': return ' '.join(tokens) else: return self.dict.vec2txt(tokens) def pick_chunk(self, query: str, doc_title: str, doc_text: str, doc_url: str): """ Splits the document and returns the selected chunks. The number of returned chunks is controlled by `n_ranked_doc_chunks` in opt. The chunk selection is determined by `doc_chunks_ranker` in the opt. """ if not doc_text: # When there is no search query for the context return [("", 0)] tokens = self.text2tokens(doc_text) if self.opt['doc_chunks_ranker'] != 'woi_chunk_retrieved_docs': doc_chunks = [ self.tokens2text(tokens[i : i + self.len_chunk]) for i in range(0, len(tokens), self.len_chunk) ] else: doc_chunks = self.tokens2text(tokens) return self.chunk_reranker.get_top_chunks(query, doc_title, doc_chunks, doc_url) class SearchQuerySearchEngineRetriever(SearchQueryRetriever): """ A retriever that uses a search engine server for retrieving documents. It instantiates a `SearchEngineRetriever` object that in turns send search queries to an external server for retrieving documents. """ def __init__(self, opt: Opt, dictionary: DictionaryAgent, shared: TShared): super().__init__(opt, dictionary, shared) if not shared: self.search_client = self.initiate_retriever_api(opt) else: self.search_client = shared['search_client'] def share(self) -> TShared: shared = super().share() shared['search_client'] = self.search_client return shared def initiate_retriever_api(self, opt) -> SearchEngineRetriever: logging.info('Creating the search engine retriever.') return SearchEngineRetriever(opt) def _empty_docs(self, num: int): """ Generates the requested number of empty documents. """ return [BLANK_SEARCH_DOC for _ in range(num)] def rank_score(self, rank_id: int): """ Scores the chunks of the retrieved document based on their rank. Note that this is the score for the retrieved document and applies to all its chunks. """ return 1 / (1 + rank_id) def _display_urls(self, search_results): """ Generates a string that lists retrieved URLs (document IDs). """ return '\n'.join([d['url'] for d in search_results if d['url']]) def retrieve_and_score( self, query: torch.LongTensor ) -> Tuple[List[List[Document]], torch.Tensor]: """ Retrieves relevant documents for the query (the conversation context). This method conducts three main steps that are flagged in the main code as well. Step 1: generate search queries for the conversation context batch.This step uses the query generator model (self.query_generator). Step 2: use the search client to retrieve documents.This step uses retrieval API agent (self.search_client) Step 3: generate the list of Document objects from the retrieved content. Here if the documents too long, the code splits them and chooses a chunk based on the selected `doc_chunks_ranker` in the opt. """ # step 1 search_queries = self.generate_search_query(query) # step 2 search_results_batch = self.search_client.retrieve(search_queries, self.n_docs) # step 3 top_docs = [] top_doc_scores = [] max_n_docs: int = self.n_docs for sq, search_results in zip(search_queries, search_results_batch): if not search_results: search_results = self._empty_docs(self.n_docs) elif len(search_results) < self.n_docs: remain_docs = self.n_docs - len(search_results) search_results.extend(self._empty_docs(remain_docs)) docs_i = [] scors_i = [] # Change this debug later logging.debug(f'URLS:\n{self._display_urls(search_results)}') for i, doc in enumerate(search_results): url = doc['url'] title = doc['title'] dcontent = doc['content'] assert type(dcontent) in ( str, list, ), f'Unrecognized retrieved doc: {dcontent}' full_text = ( dcontent if isinstance(dcontent, str) else '\n'.join(doc['content']) ) doc_chunks = [ dc[0] for dc in self.pick_chunk(sq, title, full_text, url) ] for splt_id, splt_content in enumerate(doc_chunks): docs_i.append( Document( docid=url, text=splt_content, title=f'{title}_{splt_id}' ) ) scors_i.append(self.rank_score(i)) max_n_docs = max(max_n_docs, len(docs_i)) top_docs.append(docs_i) top_doc_scores.append(scors_i) # Pad with empty docs for i in range(len(top_docs)): n_empty = max_n_docs - len(top_docs[i]) if n_empty: top_docs[i] = top_docs[i] + [BLANK_DOC] * n_empty top_doc_scores[i] = top_doc_scores[i] + [0] * n_empty self.top_docs = top_docs return top_docs, torch.Tensor(top_doc_scores).to(query.device) class SearchQueryFAISSIndexRetriever(SearchQueryRetriever, DPRRetriever): def __init__(self, opt: Opt, dictionary: DictionaryAgent, shared): SearchQueryRetriever.__init__(self, opt, dictionary, shared=shared) self.load_index(opt, shared) def share(self) -> TShared: shared = SearchQueryRetriever.share(self) shared.update(DPRRetriever.share(self)) return shared def retrieve_and_score( self, query: torch.LongTensor ) -> Tuple[List[List[Document]], torch.Tensor]: """ Retrieves from the FAISS index using a search query. This methods relies on the `retrieve_and_score` method in `RagRetriever` ancestor class. It receive the query (conversation context) and generatess the search term queries based on them. Then uses those search quries (instead of the the query text itself) to retrieve from the FAISS index. """ search_queries = self.generate_search_query(query) tokenized_search_queries, _ = padded_tensor( [self._tokenizer.encode(sq) for sq in search_queries] ) top_docs, top_doc_scores = DPRRetriever.retrieve_and_score( self, tokenized_search_queries.to(query.device) ) for query_id in range(len(top_docs)): if search_queries[query_id] == NO_SEARCH_QUERY: top_docs[query_id] = [BLANK_DOC for _ in range(self.n_docs)] return top_docs, top_doc_scores class ObservationEchoRetriever(RagRetriever): """ This retriever returns (echos) documents that are already passed to it to return. Use this only with GoldFiD agents. It relies on the retrieved docs being included in the observed example of the agent. """ def __init__(self, opt: Opt, dictionary: DictionaryAgent, shared: TShared = None): self._delimiter = '\n' self.n_docs = opt['n_docs'] self._query_ids = dict() self._saved_docs = dict() self._largest_seen_idx = -1 super().__init__(opt, dictionary, shared=shared) def add_retrieve_doc(self, query: str, retrieved_docs: List[Document]): self._largest_seen_idx += 1 new_idx = self._largest_seen_idx if new_idx in self._query_ids.values() or new_idx in self._saved_docs: raise RuntimeError( "Nonunique new_idx created in add_retrieve_doc in ObservationEchoRetriever \n" "this might return the same set of docs for two distinct queries" ) self._query_ids[query] = new_idx self._saved_docs[new_idx] = retrieved_docs or [ BLANK_DOC for _ in range(self.n_docs) ] def tokenize_query(self, query: str) -> List[int]: return [self._query_ids[query]] def get_delimiter(self) -> str: return self._delimiter def _clear_mapping(self): self._query_ids = dict() self._saved_docs = dict() self._largest_seen_idx = -1 def retrieve_and_score( self, query: torch.LongTensor ) -> Tuple[List[List[Document]], torch.Tensor]: batch_size = query.size(0) retrieved_docs = [] for endoded_query in query.tolist(): docs_retrieve_idx = endoded_query[0] retrieved_docs.append(self._saved_docs[docs_retrieve_idx]) # Some arbitrary scoring of docs max_num_docs = max([len(rtds) for rtds in retrieved_docs]) retrieved_doc_scores = torch.Tensor([1 / (1 + i) for i in range(max_num_docs)]) retrieved_doc_scores = retrieved_doc_scores.repeat(batch_size, 1).to( query.device ) # empty the 2 mappings after each retrieval self._clear_mapping() return retrieved_docs, retrieved_doc_scores class DocumentChunkRanker: """ Base class for controlling splitting long documents and selecting relevant chunks. """ def __init__(self, n_retrieved_chunks): self.n_ret_chunks = n_retrieved_chunks @abstractmethod def get_top_chunks( self, query: str, doc_title: str, doc_chunks: Union[List[str], str], doc_url: str, ): """ Ranks documents (chunk) based on their relevance to `query` """ class HeadChunkRanker(DocumentChunkRanker): """ Returns the head chunks only. """ def get_top_chunks( self, query: str, doc_title: str, doc_chunks: Union[List[str], str], doc_url: str, ): """ Return chunks in doc-present order. """ return [(c,) for c in doc_chunks[: self.n_ret_chunks]] class RetrievedChunkRanker(DocumentChunkRanker): """ Utilize retrieved doc chunk mutator. """ def __init__(self, n_retrieved_chunks, chunk_size: int = 500): super().__init__(n_retrieved_chunks) self.chunk_size = chunk_size def get_top_chunks( self, query: str, doc_title: str, doc_chunks: Union[List[str], str], doc_url: str, ): """ Return chunks according to the woi_chunk_retrieved_docs_mutator """ if isinstance(doc_chunks, list): docs = ''.join(doc_chunks) else: assert isinstance(doc_chunks, str) docs = doc_chunks chunks = chunk_docs_in_message( Message( { CONST.RETRIEVED_DOCS: [docs], CONST.RETRIEVED_DOCS_TITLES: [doc_title], CONST.RETRIEVED_DOCS_URLS: [doc_url], CONST.SELECTED_SENTENCES: [CONST.NO_SELECTED_SENTENCES_TOKEN], } ), self.chunk_size, )[CONST.RETRIEVED_DOCS] return [(c,) for c in chunks[: self.n_ret_chunks]] class TfidfChunkRanker(DocumentChunkRanker): """ Uses TF-IDF to compare chunks to the original search query. """ def __init__(self, n_retrieved_chunks): super().__init__(n_retrieved_chunks) self._vectorizer = TfidfVectorizer() def get_top_chunks( self, query: str, doc_title: str, doc_chunks: Union[List[str], str], doc_url: str, ): assert isinstance(doc_chunks, list) vectorized_corpus = self._vectorizer.fit_transform(doc_chunks + [query]) docs_vec = vectorized_corpus[:-1, :] q_vec = vectorized_corpus[-1, :] scores = np.hstack((q_vec * docs_vec.transpose()).toarray()) top_chunk_ids = np.argsort(-scores)[: self.n_ret_chunks] return [(doc_chunks[i], scores[i]) for i in top_chunk_ids] def retriever_factory( opt: Opt, dictionary: DictionaryAgent, shared=None ) -> Optional[RagRetriever]: """ Build retriever. :param opt: ParlAI Opt :param dictionary: dictionary agent :param shared: shared objects. :return retriever: return a retriever for RAG. """ if opt.get('converting'): return None # only build retriever when not converting a BART model retriever = RetrieverType(opt['rag_retriever_type']) if retriever is RetrieverType.DPR: return DPRRetriever(opt, dictionary, shared=shared) elif retriever is RetrieverType.TFIDF: return TFIDFRetriever(opt, dictionary, shared=shared) elif retriever is RetrieverType.DPR_THEN_POLY: return DPRThenPolyRetriever(opt, dictionary, shared=shared) elif retriever is RetrieverType.POLY_FAISS: return PolyFaissRetriever(opt, dictionary, shared=shared) elif retriever is RetrieverType.SEARCH_ENGINE: return SearchQuerySearchEngineRetriever(opt, dictionary, shared=shared) elif retriever is RetrieverType.SEARCH_TERM_FAISS: return SearchQueryFAISSIndexRetriever(opt, dictionary, shared=shared) elif retriever is RetrieverType.OBSERVATION_ECHO_RETRIEVER: return ObservationEchoRetriever(opt, dictionary, shared=shared)
import copy import os import sys import argparse import traceback import gc parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("-i", "--ip", help="Set IP address for sending tracking data", default="127.0.0.1") parser.add_argument("-p", "--port", type=int, help="Set port for sending tracking data", default=11573) if os.name == 'nt': parser.add_argument("-l", "--list-cameras", type=int, help="Set this to 1 to list the available cameras and quit, set this to 2 or higher to output only the names", default=0) parser.add_argument("-a", "--list-dcaps", type=int, help="Set this to -1 to list all cameras and their available capabilities, set this to a camera id to list that camera's capabilities", default=None) parser.add_argument("-W", "--width", type=int, help="Set camera and raw RGB width", default=640) parser.add_argument("-H", "--height", type=int, help="Set camera and raw RGB height", default=360) parser.add_argument("-F", "--fps", type=int, help="Set camera frames per second", default=24) parser.add_argument("-D", "--dcap", type=int, help="Set which device capability line to use or -1 to use the default camera settings", default=None) parser.add_argument("-B", "--blackmagic", type=int, help="When set to 1, special support for Blackmagic devices is enabled", default=0) else: parser.add_argument("-W", "--width", type=int, help="Set raw RGB width", default=640) parser.add_argument("-H", "--height", type=int, help="Set raw RGB height", default=360) parser.add_argument("-c", "--capture", help="Set camera ID (0, 1...) or video file", default="0") parser.add_argument("-M", "--mirror-input", action="store_true", help="Process a mirror image of the input video") parser.add_argument("-m", "--max-threads", type=int, help="Set the maximum number of threads", default=1) parser.add_argument("-t", "--threshold", type=float, help="Set minimum confidence threshold for face tracking", default=None) parser.add_argument("-d", "--detection-threshold", type=float, help="Set minimum confidence threshold for face detection", default=0.6) parser.add_argument("-v", "--visualize", type=int, help="Set this to 1 to visualize the tracking, to 2 to also show face ids, to 3 to add confidence values or to 4 to add numbers to the point display", default=0) parser.add_argument("-P", "--pnp-points", type=int, help="Set this to 1 to add the 3D fitting points to the visualization", default=0) parser.add_argument("-s", "--silent", type=int, help="Set this to 1 to prevent text output on the console", default=0) parser.add_argument("--faces", type=int, help="Set the maximum number of faces (slow)", default=1) parser.add_argument("--scan-retinaface", type=int, help="When set to 1, scanning for additional faces will be performed using RetinaFace in a background thread, otherwise a simpler, faster face detection mechanism is used. When the maximum number of faces is 1, this option does nothing.", default=0) parser.add_argument("--scan-every", type=int, help="Set after how many frames a scan for new faces should run", default=3) parser.add_argument("--discard-after", type=int, help="Set the how long the tracker should keep looking for lost faces", default=10) parser.add_argument("--max-feature-updates", type=int, help="This is the number of seconds after which feature min/max/medium values will no longer be updated once a face has been detected.", default=900) parser.add_argument("--no-3d-adapt", type=int, help="When set to 1, the 3D face model will not be adapted to increase the fit", default=1) parser.add_argument("--try-hard", type=int, help="When set to 1, the tracker will try harder to find a face", default=0) parser.add_argument("--video-out", help="Set this to the filename of an AVI file to save the tracking visualization as a video", default=None) parser.add_argument("--video-scale", type=int, help="This is a resolution scale factor applied to the saved AVI file", default=1, choices=[1,2,3,4]) parser.add_argument("--video-fps", type=float, help="This sets the frame rate of the output AVI file", default=24) parser.add_argument("--raw-rgb", type=int, help="When this is set, raw RGB frames of the size given with \"-W\" and \"-H\" are read from standard input instead of reading a video", default=0) parser.add_argument("--log-data", help="You can set a filename to which tracking data will be logged here", default="") parser.add_argument("--log-output", help="You can set a filename to console output will be logged here", default="") parser.add_argument("--model", type=int, help="This can be used to select the tracking model. Higher numbers are models with better tracking quality, but slower speed, except for model 4, which is wink optimized. Models 1 and 0 tend to be too rigid for expression and blink detection. Model -2 is roughly equivalent to model 1, but faster. Model -3 is between models 0 and -1.", default=3, choices=[-3, -2, -1, 0, 1, 2, 3, 4]) parser.add_argument("--model-dir", help="This can be used to specify the path to the directory containing the .onnx model files", default=None) parser.add_argument("--gaze-tracking", type=int, help="When set to 1, experimental blink detection and gaze tracking are enabled, which makes things slightly slower", default=1) parser.add_argument("--face-id-offset", type=int, help="When set, this offset is added to all face ids, which can be useful for mixing tracking data from multiple network sources", default=0) parser.add_argument("--repeat-video", type=int, help="When set to 1 and a video file was specified with -c, the tracker will loop the video until interrupted", default=0) parser.add_argument("--dump-points", type=str, help="When set to a filename, the current face 3D points are made symmetric and dumped to the given file when quitting the visualization with the \"q\" key", default="") parser.add_argument("--benchmark", type=int, help="When set to 1, the different tracking models are benchmarked, starting with the best and ending with the fastest and with gaze tracking disabled for models with negative IDs", default=0) if os.name == 'nt': parser.add_argument("--use-dshowcapture", type=int, help="When set to 1, libdshowcapture will be used for video input instead of OpenCV", default=1) parser.add_argument("--blackmagic-options", type=str, help="When set, this additional option string is passed to the blackmagic capture library", default=None) parser.add_argument("--priority", type=int, help="When set, the process priority will be changed", default=None, choices=[0, 1, 2, 3, 4, 5]) args = parser.parse_args() os.environ["OMP_NUM_THREADS"] = str(args.max_threads) class OutputLog(object): def __init__(self, fh, output): self.fh = fh self.output = output def write(self, buf): if not self.fh is None: self.fh.write(buf) self.output.write(buf) self.flush() def flush(self): if not self.fh is None: self.fh.flush() self.output.flush() output_logfile = None if args.log_output != "": output_logfile = open(args.log_output, "w") sys.stdout = OutputLog(output_logfile, sys.stdout) sys.stderr = OutputLog(output_logfile, sys.stderr) if os.name == 'nt': import dshowcapture if args.blackmagic == 1: dshowcapture.set_bm_enabled(True) if not args.blackmagic_options is None: dshowcapture.set_options(args.blackmagic_options) if not args.priority is None: import psutil classes = [psutil.IDLE_PRIORITY_CLASS, psutil.BELOW_NORMAL_PRIORITY_CLASS, psutil.NORMAL_PRIORITY_CLASS, psutil.ABOVE_NORMAL_PRIORITY_CLASS, psutil.HIGH_PRIORITY_CLASS, psutil.REALTIME_PRIORITY_CLASS] p = psutil.Process(os.getpid()) p.nice(classes[args.priority]) if os.name == 'nt' and (args.list_cameras > 0 or not args.list_dcaps is None): cap = dshowcapture.DShowCapture() info = cap.get_info() unit = 10000000.; if not args.list_dcaps is None: formats = {0: "Any", 1: "Unknown", 100: "ARGB", 101: "XRGB", 200: "I420", 201: "NV12", 202: "YV12", 203: "Y800", 300: "YVYU", 301: "YUY2", 302: "UYVY", 303: "HDYC (Unsupported)", 400: "MJPEG", 401: "H264" } for cam in info: if args.list_dcaps == -1: type = "" if cam['type'] == "Blackmagic": type = "Blackmagic: " print(f"{cam["index"]}: {type}{cam["name"]}") if args.list_dcaps != -1 and args.list_dcaps != cam['index']: continue for caps in cam['caps']: format = caps['format'] if caps['format'] in formats: format = formats[caps['format']] if caps['minCX'] == caps['maxCX'] and caps['minCY'] == caps['maxCY']: print(f" {caps["id"]}: Resolution: {caps["minCX"]}x{caps["minCY"]} FPS: {unit/caps["maxInterval"]:.3f}-{unit/caps["minInterval"]:.3f} Format: {format}") else: print(f" {caps["id"]}: Resolution: {caps["minCX"]}x{caps["minCY"]}-{caps["maxCX"]}x{caps["maxCY"]} FPS: {unit/caps["maxInterval"]:.3f}-{unit/caps["minInterval"]:.3f} Format: {format}") else: if args.list_cameras == 1: print("Available cameras:") for cam in info: type = "" if cam['type'] == "Blackmagic": type = "Blackmagic: " if args.list_cameras == 1: print(f"{cam["index"]}: {type}{cam["name"]}") else: print(f"{type}{cam["name"]}") cap.destroy_capture() sys.exit(0) import numpy as np import time import cv2 import socket import struct import json from input_reader import InputReader, VideoReader, DShowCaptureReader, try_int from tracker import Tracker, get_model_base_path if args.benchmark > 0: model_base_path = get_model_base_path(args.model_dir) im = cv2.imread(os.path.join(model_base_path, "benchmark.bin"), cv2.IMREAD_COLOR) results = [] for model_type in [3, 2, 1, 0, -1, -2, -3]: tracker = Tracker(224, 224, threshold=0.1, max_threads=args.max_threads, max_faces=1, discard_after=0, scan_every=0, silent=True, model_type=model_type, model_dir=args.model_dir, no_gaze=(model_type == -1), detection_threshold=0.1, use_retinaface=0, max_feature_updates=900, static_model=True if args.no_3d_adapt == 1 else False) tracker.detected = 1 tracker.faces = [(0, 0, 224, 224)] total = 0.0 for i in range(100): start = time.perf_counter() r = tracker.predict(im) total += time.perf_counter() - start print(1. / (total / 100.)) sys.exit(0) target_ip = args.ip target_port = args.port if args.faces >= 40: print("Transmission of tracking data over network is not supported with 40 or more faces.") fps = 0 dcap = None use_dshowcapture_flag = False if os.name == 'nt': fps = args.fps dcap = args.dcap use_dshowcapture_flag = True if args.use_dshowcapture == 1 else False input_reader = InputReader(args.capture, args.raw_rgb, args.width, args.height, fps, use_dshowcapture=use_dshowcapture_flag, dcap=dcap) if args.dcap == -1 and type(input_reader) == DShowCaptureReader: fps = min(fps, input_reader.device.get_fps()) else: input_reader = InputReader(args.capture, args.raw_rgb, args.width, args.height, fps, use_dshowcapture=use_dshowcapture_flag) if type(input_reader.reader) == VideoReader: fps = 0 log = None out = None first = True height = 0 width = 0 tracker = None sock = None total_tracking_time = 0.0 tracking_time = 0.0 tracking_frames = 0 frame_count = 0 features = ["eye_l", "eye_r", "eyebrow_steepness_l", "eyebrow_updown_l", "eyebrow_quirk_l", "eyebrow_steepness_r", "eyebrow_updown_r", "eyebrow_quirk_r", "mouth_corner_updown_l", "mouth_corner_inout_l", "mouth_corner_updown_r", "mouth_corner_inout_r", "mouth_open", "mouth_wide"] if args.log_data != "": log = open(args.log_data, "w") log.write("Frame,Time,Width,Height,FPS,Face,FaceID,RightOpen,LeftOpen,AverageConfidence,Success3D,PnPError,RotationQuat.X,RotationQuat.Y,RotationQuat.Z,RotationQuat.W,Euler.X,Euler.Y,Euler.Z,RVec.X,RVec.Y,RVec.Z,TVec.X,TVec.Y,TVec.Z") for i in range(66): log.write(f",Landmark[{i}].X,Landmark[{i}].Y,Landmark[{i}].Confidence") for i in range(66): log.write(f",Point3D[{i}].X,Point3D[{i}].Y,Point3D[{i}].Z") for feature in features: log.write(f",{feature}") log.write("\r\n") log.flush() is_camera = args.capture == str(try_int(args.capture)) try: attempt = 0 frame_time = time.perf_counter() target_duration = 0 if fps > 0: target_duration = 1. / float(fps) repeat = args.repeat_video != 0 and type(input_reader.reader) == VideoReader need_reinit = 0 failures = 0 source_name = input_reader.name while repeat or input_reader.is_open(): if not input_reader.is_open() or need_reinit == 1: input_reader = InputReader(args.capture, args.raw_rgb, args.width, args.height, fps, use_dshowcapture=use_dshowcapture_flag, dcap=dcap) if input_reader.name != source_name: print(f"Failed to reinitialize camera and got {input_reader.name} instead of {source_name}.") sys.exit(1) need_reinit = 2 time.sleep(0.02) continue if not input_reader.is_ready(): time.sleep(0.02) continue ret, frame = input_reader.read() if ret and args.mirror_input: frame = cv2.flip(frame, 1) if not ret: if repeat: if need_reinit == 0: need_reinit = 1 continue elif is_camera: attempt += 1 if attempt > 30: break else: time.sleep(0.02) if attempt == 3: need_reinit = 1 continue else: break; attempt = 0 need_reinit = 0 frame_count += 1 now = time.time() if first: first = False height, width, channels = frame.shape sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) tracker = Tracker(width, height, threshold=args.threshold, max_threads=args.max_threads, max_faces=args.faces, discard_after=args.discard_after, scan_every=args.scan_every, silent=False if args.silent == 0 else True, model_type=args.model, model_dir=args.model_dir, no_gaze=False if args.gaze_tracking != 0 and args.model != -1 else True, detection_threshold=args.detection_threshold, use_retinaface=args.scan_retinaface, max_feature_updates=args.max_feature_updates, static_model=True if args.no_3d_adapt == 1 else False, try_hard=args.try_hard == 1) if not args.video_out is None: out = cv2.VideoWriter(args.video_out, cv2.VideoWriter_fourcc('F','F','V','1'), args.video_fps, (width * args.video_scale, height * args.video_scale)) try: inference_start = time.perf_counter() faces = tracker.predict(frame) if len(faces) > 0: inference_time = (time.perf_counter() - inference_start) total_tracking_time += inference_time tracking_time += inference_time / len(faces) tracking_frames += 1 packet = bytearray() detected = False for face_num, f in enumerate(faces): f = copy.copy(f) f.id += args.face_id_offset if f.eye_blink is None: f.eye_blink = [1, 1] right_state = "O" if f.eye_blink[0] > 0.30 else "-" left_state = "O" if f.eye_blink[1] > 0.30 else "-" if args.silent == 0: print(f"Confidence[{f.id}]: {f.conf:.4f} / 3D fitting error: {f.pnp_error:.4f} / Eyes: {left_state}, {right_state}") detected = True if not f.success: pts_3d = np.zeros((70, 3), np.float32) packet.extend(bytearray(struct.pack("d", now))) packet.extend(bytearray(struct.pack("i", f.id))) packet.extend(bytearray(struct.pack("f", width))) packet.extend(bytearray(struct.pack("f", height))) packet.extend(bytearray(struct.pack("f", f.eye_blink[0]))) packet.extend(bytearray(struct.pack("f", f.eye_blink[1]))) packet.extend(bytearray(struct.pack("B", 1 if f.success else 0))) packet.extend(bytearray(struct.pack("f", f.pnp_error))) packet.extend(bytearray(struct.pack("f", f.quaternion[0]))) packet.extend(bytearray(struct.pack("f", f.quaternion[1]))) packet.extend(bytearray(struct.pack("f", f.quaternion[2]))) packet.extend(bytearray(struct.pack("f", f.quaternion[3]))) packet.extend(bytearray(struct.pack("f", f.euler[0]))) packet.extend(bytearray(struct.pack("f", f.euler[1]))) packet.extend(bytearray(struct.pack("f", f.euler[2]))) packet.extend(bytearray(struct.pack("f", f.translation[0]))) packet.extend(bytearray(struct.pack("f", f.translation[1]))) packet.extend(bytearray(struct.pack("f", f.translation[2]))) if not log is None: log.write(f"{frame_count},{now},{width},{height},{args.fps},{face_num},{f.id},{f.eye_blink[0]},{f.eye_blink[1]},{f.conf},{f.success},{f.pnp_error},{f.quaternion[0]},{f.quaternion[1]},{f.quaternion[2]},{f.quaternion[3]},{f.euler[0]},{f.euler[1]},{f.euler[2]},{f.rotation[0]},{f.rotation[1]},{f.rotation[2]},{f.translation[0]},{f.translation[1]},{f.translation[2]}") for (x,y,c) in f.lms: packet.extend(bytearray(struct.pack("f", c))) if args.visualize > 1: frame = cv2.putText(frame, str(f.id), (int(f.bbox[0]), int(f.bbox[1])), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (255,0,255)) if args.visualize > 2: frame = cv2.putText(frame, f"{f.conf:.4f}", (int(f.bbox[0] + 18), int(f.bbox[1] - 6)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,255)) for pt_num, (x,y,c) in enumerate(f.lms): packet.extend(bytearray(struct.pack("f", y))) packet.extend(bytearray(struct.pack("f", x))) if not log is None: log.write(f",{y},{x},{c}") if pt_num == 66 and (f.eye_blink[0] < 0.30 or c < 0.30): continue if pt_num == 67 and (f.eye_blink[1] < 0.30 or c < 0.30): continue x = int(x + 0.5) y = int(y + 0.5) if args.visualize != 0 or not out is None: if args.visualize > 3: frame = cv2.putText(frame, str(pt_num), (int(y), int(x)), cv2.FONT_HERSHEY_SIMPLEX, 0.25, (255,255,0)) color = (0, 255, 0) if pt_num >= 66: color = (255, 255, 0) if not (x < 0 or y < 0 or x >= height or y >= width): frame[int(x), int(y)] = color x += 1 if not (x < 0 or y < 0 or x >= height or y >= width): frame[int(x), int(y)] = color y += 1 if not (x < 0 or y < 0 or x >= height or y >= width): frame[int(x), int(y)] = color x -= 1 if not (x < 0 or y < 0 or x >= height or y >= width): frame[int(x), int(y)] = color if args.pnp_points != 0 and (args.visualize != 0 or not out is None) and f.rotation is not None: if args.pnp_points > 1: projected = cv2.projectPoints(f.face_3d[0:66], f.rotation, f.translation, tracker.camera, tracker.dist_coeffs) else: projected = cv2.projectPoints(f.contour, f.rotation, f.translation, tracker.camera, tracker.dist_coeffs) for [(x,y)] in projected[0]: x = int(x + 0.5) y = int(y + 0.5) if not (x < 0 or y < 0 or x >= height or y >= width): frame[int(x), int(y)] = (0, 255, 255) x += 1 if not (x < 0 or y < 0 or x >= height or y >= width): frame[int(x), int(y)] = (0, 255, 255) y += 1 if not (x < 0 or y < 0 or x >= height or y >= width): frame[int(x), int(y)] = (0, 255, 255) x -= 1 if not (x < 0 or y < 0 or x >= height or y >= width): frame[int(x), int(y)] = (0, 255, 255) for (x,y,z) in f.pts_3d: packet.extend(bytearray(struct.pack("f", x))) packet.extend(bytearray(struct.pack("f", -y))) packet.extend(bytearray(struct.pack("f", -z))) if not log is None: log.write(f",{x},{-y},{-z}") if f.current_features is None: f.current_features = {} for feature in features: if not feature in f.current_features: f.current_features[feature] = 0 packet.extend(bytearray(struct.pack("f", f.current_features[feature]))) if not log is None: log.write(f",{f.current_features[feature]}") if not log is None: log.write("\r\n") log.flush() if detected and len(faces) < 40: sock.sendto(packet, (target_ip, target_port)) if not out is None: video_frame = frame if args.video_scale != 1: video_frame = cv2.resize(frame, (width * args.video_scale, height * args.video_scale), interpolation=cv2.INTER_NEAREST) out.write(video_frame) if args.video_scale != 1: del video_frame if args.visualize != 0: cv2.imshow('OpenSeeFace Visualization', frame) if cv2.waitKey(1) & 0xFF == ord('q'): if args.dump_points != "" and not faces is None and len(faces) > 0: np.set_printoptions(threshold=sys.maxsize, precision=15) pairs = [ (0, 16), (1, 15), (2, 14), (3, 13), (4, 12), (5, 11), (6, 10), (7, 9), (17, 26), (18, 25), (19, 24), (20, 23), (21, 22), (31, 35), (32, 34), (36, 45), (37, 44), (38, 43), (39, 42), (40, 47), (41, 46), (48, 52), (49, 51), (56, 54), (57, 53), (58, 62), (59, 61), (65, 63) ] points = copy.copy(faces[0].face_3d) for a, b in pairs: x = (points[a, 0] - points[b, 0]) / 2.0 y = (points[a, 1] + points[b, 1]) / 2.0 z = (points[a, 2] + points[b, 2]) / 2.0 points[a, 0] = x points[b, 0] = -x points[[a, b], 1] = y points[[a, b], 2] = z points[[8, 27, 28, 29, 33, 50, 55, 60, 64], 0] = 0.0 points[30, :] = 0.0 with open(args.dump_points, "w") as fh: fh.write(repr(points)) break failures = 0 except Exception as e: if e.__class__ == KeyboardInterrupt: if args.silent == 0: print("Quitting") break traceback.print_exc() failures += 1 if failures > 30: break collected = False del frame duration = time.perf_counter() - frame_time while duration < target_duration: if not collected: gc.collect() collected = True duration = time.perf_counter() - frame_time sleep_time = target_duration - duration if sleep_time > 0: time.sleep(sleep_time) duration = time.perf_counter() - frame_time frame_time = time.perf_counter() except KeyboardInterrupt: if args.silent == 0: print("Quitting") input_reader.close() if not out is None: out.release() cv2.destroyAllWindows() if args.silent == 0 and tracking_frames > 0: average_tracking_time = 1000 * tracking_time / tracking_frames print(f"Average tracking time per detected face: {average_tracking_time:.2f} ms") print(f"Tracking time: {total_tracking_time:.3f} s\nFrames: {tracking_frames}")
import copy import os import sys import argparse import traceback import gc parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("-i", "--ip", help="Set IP address for sending tracking data", default="127.0.0.1") parser.add_argument("-p", "--port", type=int, help="Set port for sending tracking data", default=11573) if os.name == 'nt': parser.add_argument("-l", "--list-cameras", type=int, help="Set this to 1 to list the available cameras and quit, set this to 2 or higher to output only the names", default=0) parser.add_argument("-a", "--list-dcaps", type=int, help="Set this to -1 to list all cameras and their available capabilities, set this to a camera id to list that camera's capabilities", default=None) parser.add_argument("-W", "--width", type=int, help="Set camera and raw RGB width", default=640) parser.add_argument("-H", "--height", type=int, help="Set camera and raw RGB height", default=360) parser.add_argument("-F", "--fps", type=int, help="Set camera frames per second", default=24) parser.add_argument("-D", "--dcap", type=int, help="Set which device capability line to use or -1 to use the default camera settings", default=None) parser.add_argument("-B", "--blackmagic", type=int, help="When set to 1, special support for Blackmagic devices is enabled", default=0) else: parser.add_argument("-W", "--width", type=int, help="Set raw RGB width", default=640) parser.add_argument("-H", "--height", type=int, help="Set raw RGB height", default=360) parser.add_argument("-c", "--capture", help="Set camera ID (0, 1...) or video file", default="0") parser.add_argument("-M", "--mirror-input", action="store_true", help="Process a mirror image of the input video") parser.add_argument("-m", "--max-threads", type=int, help="Set the maximum number of threads", default=1) parser.add_argument("-t", "--threshold", type=float, help="Set minimum confidence threshold for face tracking", default=None) parser.add_argument("-d", "--detection-threshold", type=float, help="Set minimum confidence threshold for face detection", default=0.6) parser.add_argument("-v", "--visualize", type=int, help="Set this to 1 to visualize the tracking, to 2 to also show face ids, to 3 to add confidence values or to 4 to add numbers to the point display", default=0) parser.add_argument("-P", "--pnp-points", type=int, help="Set this to 1 to add the 3D fitting points to the visualization", default=0) parser.add_argument("-s", "--silent", type=int, help="Set this to 1 to prevent text output on the console", default=0) parser.add_argument("--faces", type=int, help="Set the maximum number of faces (slow)", default=1) parser.add_argument("--scan-retinaface", type=int, help="When set to 1, scanning for additional faces will be performed using RetinaFace in a background thread, otherwise a simpler, faster face detection mechanism is used. When the maximum number of faces is 1, this option does nothing.", default=0) parser.add_argument("--scan-every", type=int, help="Set after how many frames a scan for new faces should run", default=3) parser.add_argument("--discard-after", type=int, help="Set the how long the tracker should keep looking for lost faces", default=10) parser.add_argument("--max-feature-updates", type=int, help="This is the number of seconds after which feature min/max/medium values will no longer be updated once a face has been detected.", default=900) parser.add_argument("--no-3d-adapt", type=int, help="When set to 1, the 3D face model will not be adapted to increase the fit", default=1) parser.add_argument("--try-hard", type=int, help="When set to 1, the tracker will try harder to find a face", default=0) parser.add_argument("--video-out", help="Set this to the filename of an AVI file to save the tracking visualization as a video", default=None) parser.add_argument("--video-scale", type=int, help="This is a resolution scale factor applied to the saved AVI file", default=1, choices=[1,2,3,4]) parser.add_argument("--video-fps", type=float, help="This sets the frame rate of the output AVI file", default=24) parser.add_argument("--raw-rgb", type=int, help="When this is set, raw RGB frames of the size given with \"-W\" and \"-H\" are read from standard input instead of reading a video", default=0) parser.add_argument("--log-data", help="You can set a filename to which tracking data will be logged here", default="") parser.add_argument("--log-output", help="You can set a filename to console output will be logged here", default="") parser.add_argument("--model", type=int, help="This can be used to select the tracking model. Higher numbers are models with better tracking quality, but slower speed, except for model 4, which is wink optimized. Models 1 and 0 tend to be too rigid for expression and blink detection. Model -2 is roughly equivalent to model 1, but faster. Model -3 is between models 0 and -1.", default=3, choices=[-3, -2, -1, 0, 1, 2, 3, 4]) parser.add_argument("--model-dir", help="This can be used to specify the path to the directory containing the .onnx model files", default=None) parser.add_argument("--gaze-tracking", type=int, help="When set to 1, experimental blink detection and gaze tracking are enabled, which makes things slightly slower", default=1) parser.add_argument("--face-id-offset", type=int, help="When set, this offset is added to all face ids, which can be useful for mixing tracking data from multiple network sources", default=0) parser.add_argument("--repeat-video", type=int, help="When set to 1 and a video file was specified with -c, the tracker will loop the video until interrupted", default=0) parser.add_argument("--dump-points", type=str, help="When set to a filename, the current face 3D points are made symmetric and dumped to the given file when quitting the visualization with the \"q\" key", default="") parser.add_argument("--benchmark", type=int, help="When set to 1, the different tracking models are benchmarked, starting with the best and ending with the fastest and with gaze tracking disabled for models with negative IDs", default=0) if os.name == 'nt': parser.add_argument("--use-dshowcapture", type=int, help="When set to 1, libdshowcapture will be used for video input instead of OpenCV", default=1) parser.add_argument("--blackmagic-options", type=str, help="When set, this additional option string is passed to the blackmagic capture library", default=None) parser.add_argument("--priority", type=int, help="When set, the process priority will be changed", default=None, choices=[0, 1, 2, 3, 4, 5]) args = parser.parse_args() os.environ["OMP_NUM_THREADS"] = str(args.max_threads) class OutputLog(object): def __init__(self, fh, output): self.fh = fh self.output = output def write(self, buf): if not self.fh is None: self.fh.write(buf) self.output.write(buf) self.flush() def flush(self): if not self.fh is None: self.fh.flush() self.output.flush() output_logfile = None if args.log_output != "": output_logfile = open(args.log_output, "w") sys.stdout = OutputLog(output_logfile, sys.stdout) sys.stderr = OutputLog(output_logfile, sys.stderr) if os.name == 'nt': import dshowcapture if args.blackmagic == 1: dshowcapture.set_bm_enabled(True) if not args.blackmagic_options is None: dshowcapture.set_options(args.blackmagic_options) if not args.priority is None: import psutil classes = [psutil.IDLE_PRIORITY_CLASS, psutil.BELOW_NORMAL_PRIORITY_CLASS, psutil.NORMAL_PRIORITY_CLASS, psutil.ABOVE_NORMAL_PRIORITY_CLASS, psutil.HIGH_PRIORITY_CLASS, psutil.REALTIME_PRIORITY_CLASS] p = psutil.Process(os.getpid()) p.nice(classes[args.priority]) if os.name == 'nt' and (args.list_cameras > 0 or not args.list_dcaps is None): cap = dshowcapture.DShowCapture() info = cap.get_info() unit = 10000000.; if not args.list_dcaps is None: formats = {0: "Any", 1: "Unknown", 100: "ARGB", 101: "XRGB", 200: "I420", 201: "NV12", 202: "YV12", 203: "Y800", 300: "YVYU", 301: "YUY2", 302: "UYVY", 303: "HDYC (Unsupported)", 400: "MJPEG", 401: "H264" } for cam in info: if args.list_dcaps == -1: type = "" if cam['type'] == "Blackmagic": type = "Blackmagic: " print(f"{cam['index']}: {type}{cam['name']}") if args.list_dcaps != -1 and args.list_dcaps != cam['index']: continue for caps in cam['caps']: format = caps['format'] if caps['format'] in formats: format = formats[caps['format']] if caps['minCX'] == caps['maxCX'] and caps['minCY'] == caps['maxCY']: print(f" {caps['id']}: Resolution: {caps['minCX']}x{caps['minCY']} FPS: {unit/caps['maxInterval']:.3f}-{unit/caps['minInterval']:.3f} Format: {format}") else: print(f" {caps['id']}: Resolution: {caps['minCX']}x{caps['minCY']}-{caps['maxCX']}x{caps['maxCY']} FPS: {unit/caps['maxInterval']:.3f}-{unit/caps['minInterval']:.3f} Format: {format}") else: if args.list_cameras == 1: print("Available cameras:") for cam in info: type = "" if cam['type'] == "Blackmagic": type = "Blackmagic: " if args.list_cameras == 1: print(f"{cam['index']}: {type}{cam['name']}") else: print(f"{type}{cam['name']}") cap.destroy_capture() sys.exit(0) import numpy as np import time import cv2 import socket import struct import json from input_reader import InputReader, VideoReader, DShowCaptureReader, try_int from tracker import Tracker, get_model_base_path if args.benchmark > 0: model_base_path = get_model_base_path(args.model_dir) im = cv2.imread(os.path.join(model_base_path, "benchmark.bin"), cv2.IMREAD_COLOR) results = [] for model_type in [3, 2, 1, 0, -1, -2, -3]: tracker = Tracker(224, 224, threshold=0.1, max_threads=args.max_threads, max_faces=1, discard_after=0, scan_every=0, silent=True, model_type=model_type, model_dir=args.model_dir, no_gaze=(model_type == -1), detection_threshold=0.1, use_retinaface=0, max_feature_updates=900, static_model=True if args.no_3d_adapt == 1 else False) tracker.detected = 1 tracker.faces = [(0, 0, 224, 224)] total = 0.0 for i in range(100): start = time.perf_counter() r = tracker.predict(im) total += time.perf_counter() - start print(1. / (total / 100.)) sys.exit(0) target_ip = args.ip target_port = args.port if args.faces >= 40: print("Transmission of tracking data over network is not supported with 40 or more faces.") fps = 0 dcap = None use_dshowcapture_flag = False if os.name == 'nt': fps = args.fps dcap = args.dcap use_dshowcapture_flag = True if args.use_dshowcapture == 1 else False input_reader = InputReader(args.capture, args.raw_rgb, args.width, args.height, fps, use_dshowcapture=use_dshowcapture_flag, dcap=dcap) if args.dcap == -1 and type(input_reader) == DShowCaptureReader: fps = min(fps, input_reader.device.get_fps()) else: input_reader = InputReader(args.capture, args.raw_rgb, args.width, args.height, fps, use_dshowcapture=use_dshowcapture_flag) if type(input_reader.reader) == VideoReader: fps = 0 log = None out = None first = True height = 0 width = 0 tracker = None sock = None total_tracking_time = 0.0 tracking_time = 0.0 tracking_frames = 0 frame_count = 0 features = ["eye_l", "eye_r", "eyebrow_steepness_l", "eyebrow_updown_l", "eyebrow_quirk_l", "eyebrow_steepness_r", "eyebrow_updown_r", "eyebrow_quirk_r", "mouth_corner_updown_l", "mouth_corner_inout_l", "mouth_corner_updown_r", "mouth_corner_inout_r", "mouth_open", "mouth_wide"] if args.log_data != "": log = open(args.log_data, "w") log.write("Frame,Time,Width,Height,FPS,Face,FaceID,RightOpen,LeftOpen,AverageConfidence,Success3D,PnPError,RotationQuat.X,RotationQuat.Y,RotationQuat.Z,RotationQuat.W,Euler.X,Euler.Y,Euler.Z,RVec.X,RVec.Y,RVec.Z,TVec.X,TVec.Y,TVec.Z") for i in range(66): log.write(f",Landmark[{i}].X,Landmark[{i}].Y,Landmark[{i}].Confidence") for i in range(66): log.write(f",Point3D[{i}].X,Point3D[{i}].Y,Point3D[{i}].Z") for feature in features: log.write(f",{feature}") log.write("\r\n") log.flush() is_camera = args.capture == str(try_int(args.capture)) try: attempt = 0 frame_time = time.perf_counter() target_duration = 0 if fps > 0: target_duration = 1. / float(fps) repeat = args.repeat_video != 0 and type(input_reader.reader) == VideoReader need_reinit = 0 failures = 0 source_name = input_reader.name while repeat or input_reader.is_open(): if not input_reader.is_open() or need_reinit == 1: input_reader = InputReader(args.capture, args.raw_rgb, args.width, args.height, fps, use_dshowcapture=use_dshowcapture_flag, dcap=dcap) if input_reader.name != source_name: print(f"Failed to reinitialize camera and got {input_reader.name} instead of {source_name}.") sys.exit(1) need_reinit = 2 time.sleep(0.02) continue if not input_reader.is_ready(): time.sleep(0.02) continue ret, frame = input_reader.read() if ret and args.mirror_input: frame = cv2.flip(frame, 1) if not ret: if repeat: if need_reinit == 0: need_reinit = 1 continue elif is_camera: attempt += 1 if attempt > 30: break else: time.sleep(0.02) if attempt == 3: need_reinit = 1 continue else: break; attempt = 0 need_reinit = 0 frame_count += 1 now = time.time() if first: first = False height, width, channels = frame.shape sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) tracker = Tracker(width, height, threshold=args.threshold, max_threads=args.max_threads, max_faces=args.faces, discard_after=args.discard_after, scan_every=args.scan_every, silent=False if args.silent == 0 else True, model_type=args.model, model_dir=args.model_dir, no_gaze=False if args.gaze_tracking != 0 and args.model != -1 else True, detection_threshold=args.detection_threshold, use_retinaface=args.scan_retinaface, max_feature_updates=args.max_feature_updates, static_model=True if args.no_3d_adapt == 1 else False, try_hard=args.try_hard == 1) if not args.video_out is None: out = cv2.VideoWriter(args.video_out, cv2.VideoWriter_fourcc('F','F','V','1'), args.video_fps, (width * args.video_scale, height * args.video_scale)) try: inference_start = time.perf_counter() faces = tracker.predict(frame) if len(faces) > 0: inference_time = (time.perf_counter() - inference_start) total_tracking_time += inference_time tracking_time += inference_time / len(faces) tracking_frames += 1 packet = bytearray() detected = False for face_num, f in enumerate(faces): f = copy.copy(f) f.id += args.face_id_offset if f.eye_blink is None: f.eye_blink = [1, 1] right_state = "O" if f.eye_blink[0] > 0.30 else "-" left_state = "O" if f.eye_blink[1] > 0.30 else "-" if args.silent == 0: print(f"Confidence[{f.id}]: {f.conf:.4f} / 3D fitting error: {f.pnp_error:.4f} / Eyes: {left_state}, {right_state}") detected = True if not f.success: pts_3d = np.zeros((70, 3), np.float32) packet.extend(bytearray(struct.pack("d", now))) packet.extend(bytearray(struct.pack("i", f.id))) packet.extend(bytearray(struct.pack("f", width))) packet.extend(bytearray(struct.pack("f", height))) packet.extend(bytearray(struct.pack("f", f.eye_blink[0]))) packet.extend(bytearray(struct.pack("f", f.eye_blink[1]))) packet.extend(bytearray(struct.pack("B", 1 if f.success else 0))) packet.extend(bytearray(struct.pack("f", f.pnp_error))) packet.extend(bytearray(struct.pack("f", f.quaternion[0]))) packet.extend(bytearray(struct.pack("f", f.quaternion[1]))) packet.extend(bytearray(struct.pack("f", f.quaternion[2]))) packet.extend(bytearray(struct.pack("f", f.quaternion[3]))) packet.extend(bytearray(struct.pack("f", f.euler[0]))) packet.extend(bytearray(struct.pack("f", f.euler[1]))) packet.extend(bytearray(struct.pack("f", f.euler[2]))) packet.extend(bytearray(struct.pack("f", f.translation[0]))) packet.extend(bytearray(struct.pack("f", f.translation[1]))) packet.extend(bytearray(struct.pack("f", f.translation[2]))) if not log is None: log.write(f"{frame_count},{now},{width},{height},{args.fps},{face_num},{f.id},{f.eye_blink[0]},{f.eye_blink[1]},{f.conf},{f.success},{f.pnp_error},{f.quaternion[0]},{f.quaternion[1]},{f.quaternion[2]},{f.quaternion[3]},{f.euler[0]},{f.euler[1]},{f.euler[2]},{f.rotation[0]},{f.rotation[1]},{f.rotation[2]},{f.translation[0]},{f.translation[1]},{f.translation[2]}") for (x,y,c) in f.lms: packet.extend(bytearray(struct.pack("f", c))) if args.visualize > 1: frame = cv2.putText(frame, str(f.id), (int(f.bbox[0]), int(f.bbox[1])), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (255,0,255)) if args.visualize > 2: frame = cv2.putText(frame, f"{f.conf:.4f}", (int(f.bbox[0] + 18), int(f.bbox[1] - 6)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,255)) for pt_num, (x,y,c) in enumerate(f.lms): packet.extend(bytearray(struct.pack("f", y))) packet.extend(bytearray(struct.pack("f", x))) if not log is None: log.write(f",{y},{x},{c}") if pt_num == 66 and (f.eye_blink[0] < 0.30 or c < 0.30): continue if pt_num == 67 and (f.eye_blink[1] < 0.30 or c < 0.30): continue x = int(x + 0.5) y = int(y + 0.5) if args.visualize != 0 or not out is None: if args.visualize > 3: frame = cv2.putText(frame, str(pt_num), (int(y), int(x)), cv2.FONT_HERSHEY_SIMPLEX, 0.25, (255,255,0)) color = (0, 255, 0) if pt_num >= 66: color = (255, 255, 0) if not (x < 0 or y < 0 or x >= height or y >= width): frame[int(x), int(y)] = color x += 1 if not (x < 0 or y < 0 or x >= height or y >= width): frame[int(x), int(y)] = color y += 1 if not (x < 0 or y < 0 or x >= height or y >= width): frame[int(x), int(y)] = color x -= 1 if not (x < 0 or y < 0 or x >= height or y >= width): frame[int(x), int(y)] = color if args.pnp_points != 0 and (args.visualize != 0 or not out is None) and f.rotation is not None: if args.pnp_points > 1: projected = cv2.projectPoints(f.face_3d[0:66], f.rotation, f.translation, tracker.camera, tracker.dist_coeffs) else: projected = cv2.projectPoints(f.contour, f.rotation, f.translation, tracker.camera, tracker.dist_coeffs) for [(x,y)] in projected[0]: x = int(x + 0.5) y = int(y + 0.5) if not (x < 0 or y < 0 or x >= height or y >= width): frame[int(x), int(y)] = (0, 255, 255) x += 1 if not (x < 0 or y < 0 or x >= height or y >= width): frame[int(x), int(y)] = (0, 255, 255) y += 1 if not (x < 0 or y < 0 or x >= height or y >= width): frame[int(x), int(y)] = (0, 255, 255) x -= 1 if not (x < 0 or y < 0 or x >= height or y >= width): frame[int(x), int(y)] = (0, 255, 255) for (x,y,z) in f.pts_3d: packet.extend(bytearray(struct.pack("f", x))) packet.extend(bytearray(struct.pack("f", -y))) packet.extend(bytearray(struct.pack("f", -z))) if not log is None: log.write(f",{x},{-y},{-z}") if f.current_features is None: f.current_features = {} for feature in features: if not feature in f.current_features: f.current_features[feature] = 0 packet.extend(bytearray(struct.pack("f", f.current_features[feature]))) if not log is None: log.write(f",{f.current_features[feature]}") if not log is None: log.write("\r\n") log.flush() if detected and len(faces) < 40: sock.sendto(packet, (target_ip, target_port)) if not out is None: video_frame = frame if args.video_scale != 1: video_frame = cv2.resize(frame, (width * args.video_scale, height * args.video_scale), interpolation=cv2.INTER_NEAREST) out.write(video_frame) if args.video_scale != 1: del video_frame if args.visualize != 0: cv2.imshow('OpenSeeFace Visualization', frame) if cv2.waitKey(1) & 0xFF == ord('q'): if args.dump_points != "" and not faces is None and len(faces) > 0: np.set_printoptions(threshold=sys.maxsize, precision=15) pairs = [ (0, 16), (1, 15), (2, 14), (3, 13), (4, 12), (5, 11), (6, 10), (7, 9), (17, 26), (18, 25), (19, 24), (20, 23), (21, 22), (31, 35), (32, 34), (36, 45), (37, 44), (38, 43), (39, 42), (40, 47), (41, 46), (48, 52), (49, 51), (56, 54), (57, 53), (58, 62), (59, 61), (65, 63) ] points = copy.copy(faces[0].face_3d) for a, b in pairs: x = (points[a, 0] - points[b, 0]) / 2.0 y = (points[a, 1] + points[b, 1]) / 2.0 z = (points[a, 2] + points[b, 2]) / 2.0 points[a, 0] = x points[b, 0] = -x points[[a, b], 1] = y points[[a, b], 2] = z points[[8, 27, 28, 29, 33, 50, 55, 60, 64], 0] = 0.0 points[30, :] = 0.0 with open(args.dump_points, "w") as fh: fh.write(repr(points)) break failures = 0 except Exception as e: if e.__class__ == KeyboardInterrupt: if args.silent == 0: print("Quitting") break traceback.print_exc() failures += 1 if failures > 30: break collected = False del frame duration = time.perf_counter() - frame_time while duration < target_duration: if not collected: gc.collect() collected = True duration = time.perf_counter() - frame_time sleep_time = target_duration - duration if sleep_time > 0: time.sleep(sleep_time) duration = time.perf_counter() - frame_time frame_time = time.perf_counter() except KeyboardInterrupt: if args.silent == 0: print("Quitting") input_reader.close() if not out is None: out.release() cv2.destroyAllWindows() if args.silent == 0 and tracking_frames > 0: average_tracking_time = 1000 * tracking_time / tracking_frames print(f"Average tracking time per detected face: {average_tracking_time:.2f} ms") print(f"Tracking time: {total_tracking_time:.3f} s\nFrames: {tracking_frames}")
import json import logging import os import time from datetime import datetime from discord.ext import commands from pretty_help import PrettyHelp with open('token.secret')as fp: TOKEN = fp.read().strip() fp.close() with open('config.json') as file: config = json.load(file) DESCRIPTION = config["DESCRIPTION"] COGS = config["COGS"] LOG_AS_FILE = config["LOG_AS_FILE"] LOGFORMAT = config["LOGFORMAT"] DATEFORMAT = config["DATEFORMAT"] PREFIXES = config["PREFIXES"] file.close() bot_uptime = time.monotonic() log_formatter = logging.Formatter(LOGFORMAT, datefmt=DATEFORMAT) root_logger = logging.getLogger() root_logger.level = logging.INFO console_handler = logging.StreamHandler() console_handler.setFormatter(log_formatter) root_logger.addHandler(console_handler) if LOG_AS_FILE is True: try: now = datetime.now() os.replace('latest.log', f'logs\\{now.strftime('%d-%m-%Y %H-%M')}.log') except Exception as e: exc = f"{type(e).__name__}: {e}" root_logger.error(f'Failed to move latest.log appending!: {exc}') file_handler = logging.FileHandler('latest.log') file_handler.setFormatter(log_formatter) root_logger.addHandler(file_handler) def get_prefix(bot, message): """A callable Prefix for our bot. This could be edited to allow per server prefixes.""" # If we are in a guild, we allow for the user to mention us or use any of the prefixes in our list. return commands.when_mentioned_or(*PREFIXES)(bot, message) client = commands.Bot(command_prefix=get_prefix, description=DESCRIPTION) client.help_command = PrettyHelp(color=0xF9006F) for cog in COGS: try: client.load_extension("cogs." + cog) print(f"Loaded {cog}.") except Exception as e: exc = f"{type(e).__name__}: {e}" print(f"Failed to load Extension {cog}:\n{exc}") client.run(TOKEN)
import json import logging import os import time from datetime import datetime from discord.ext import commands from pretty_help import PrettyHelp with open('token.secret')as fp: TOKEN = fp.read().strip() fp.close() with open('config.json') as file: config = json.load(file) DESCRIPTION = config["DESCRIPTION"] COGS = config["COGS"] LOG_AS_FILE = config["LOG_AS_FILE"] LOGFORMAT = config["LOGFORMAT"] DATEFORMAT = config["DATEFORMAT"] PREFIXES = config["PREFIXES"] file.close() bot_uptime = time.monotonic() log_formatter = logging.Formatter(LOGFORMAT, datefmt=DATEFORMAT) root_logger = logging.getLogger() root_logger.level = logging.INFO console_handler = logging.StreamHandler() console_handler.setFormatter(log_formatter) root_logger.addHandler(console_handler) if LOG_AS_FILE is True: try: now = datetime.now() os.replace('latest.log', f'logs\\{now.strftime("%d-%m-%Y %H-%M")}.log') except Exception as e: exc = f"{type(e).__name__}: {e}" root_logger.error(f'Failed to move latest.log appending!: {exc}') file_handler = logging.FileHandler('latest.log') file_handler.setFormatter(log_formatter) root_logger.addHandler(file_handler) def get_prefix(bot, message): """A callable Prefix for our bot. This could be edited to allow per server prefixes.""" # If we are in a guild, we allow for the user to mention us or use any of the prefixes in our list. return commands.when_mentioned_or(*PREFIXES)(bot, message) client = commands.Bot(command_prefix=get_prefix, description=DESCRIPTION) client.help_command = PrettyHelp(color=0xF9006F) for cog in COGS: try: client.load_extension("cogs." + cog) print(f"Loaded {cog}.") except Exception as e: exc = f"{type(e).__name__}: {e}" print(f"Failed to load Extension {cog}:\n{exc}") client.run(TOKEN)
"""Module for dealing with the toolbar. """ import os import ee import ipyevents import ipyleaflet import ipywidgets as widgets from ipyfilechooser import FileChooser from IPython.core.display import display from .common import * from .timelapse import * def tool_template(m=None): widget_width = "250px" padding = "0px 0px 0px 5px" # upper, right, bottom, left toolbar_button = widgets.ToggleButton( value=False, tooltip="Toolbar", icon="gear", layout=widgets.Layout(width="28px", height="28px", padding="0px 0px 0px 4px"), ) close_button = widgets.ToggleButton( value=False, tooltip="Close the tool", icon="times", button_style="primary", layout=widgets.Layout(height="28px", width="28px", padding="0px 0px 0px 4px"), ) checkbox = widgets.Checkbox( description="Checkbox", indent=False, layout=widgets.Layout(padding=padding, width=widget_width), ) dropdown = widgets.Dropdown( options=["Option 1", "Option 2", "Option 3"], value=None, description="Dropdown:", layout=widgets.Layout(width=widget_width, padding=padding), style={"description_width": "initial"}, ) int_slider = widgets.IntSlider( min=1, max=100, description="Int Slider: ", readout=False, continuous_update=True, layout=widgets.Layout(width="220px", padding=padding), style={"description_width": "initial"}, ) int_slider_label = widgets.Label() widgets.jslink((int_slider, "value"), (int_slider_label, "value")) float_slider = widgets.FloatSlider( min=1, max=100, description="Float Slider: ", readout=False, continuous_update=True, layout=widgets.Layout(width="220px", padding=padding), style={"description_width": "initial"}, ) float_slider_label = widgets.Label() widgets.jslink((float_slider, "value"), (float_slider_label, "value")) color = widgets.ColorPicker( concise=False, description="Color:", value="white", style={"description_width": "initial"}, layout=widgets.Layout(width=widget_width, padding=padding), ) text = widgets.Text( value="", description="Textbox:", placeholder="Placeholder", style={"description_width": "initial"}, layout=widgets.Layout(width=widget_width, padding=padding), ) textarea = widgets.Textarea( placeholder="Placeholder", layout=widgets.Layout(width=widget_width), ) buttons = widgets.ToggleButtons( value=None, options=["Apply", "Reset", "Close"], tooltips=["Apply", "Reset", "Close"], button_style="primary", ) buttons.style.button_width = "80px" output = widgets.Output(layout=widgets.Layout(width=widget_width, padding=padding)) toolbar_widget = widgets.VBox() toolbar_widget.children = [toolbar_button] toolbar_header = widgets.HBox() toolbar_header.children = [close_button, toolbar_button] toolbar_footer = widgets.VBox() toolbar_footer.children = [ checkbox, widgets.HBox([int_slider, int_slider_label]), widgets.HBox([float_slider, float_slider_label]), dropdown, text, color, textarea, buttons, output, ] toolbar_event = ipyevents.Event( source=toolbar_widget, watched_events=["mouseenter", "mouseleave"] ) def handle_toolbar_event(event): if event["type"] == "mouseenter": toolbar_widget.children = [toolbar_header, toolbar_footer] elif event["type"] == "mouseleave": if not toolbar_button.value: toolbar_widget.children = [toolbar_button] toolbar_button.value = False close_button.value = False toolbar_event.on_dom_event(handle_toolbar_event) def toolbar_btn_click(change): if change["new"]: close_button.value = False toolbar_widget.children = [toolbar_header, toolbar_footer] else: if not close_button.value: toolbar_widget.children = [toolbar_button] toolbar_button.observe(toolbar_btn_click, "value") def close_btn_click(change): if change["new"]: toolbar_button.value = False if m is not None: m.toolbar_reset() if m.tool_control is not None and m.tool_control in m.controls: m.remove_control(m.tool_control) m.tool_control = None toolbar_widget.close() close_button.observe(close_btn_click, "value") def button_clicked(change): if change["new"] == "Apply": with output: output.clear_output() print("Running ...") elif change["new"] == "Reset": textarea.value = "" output.clear_output() elif change["new"] == "Close": if m is not None: m.toolbar_reset() if m.tool_control is not None and m.tool_control in m.controls: m.remove_control(m.tool_control) m.tool_control = None toolbar_widget.close() buttons.value = None buttons.observe(button_clicked, "value") toolbar_button.value = True if m is not None: toolbar_control = ipyleaflet.WidgetControl( widget=toolbar_widget, position="topright" ) if toolbar_control not in m.controls: m.add_control(toolbar_control) m.tool_control = toolbar_control else: return toolbar_widget def open_data_widget(m): """A widget for opening local vector/raster data. Args: m (object): geemap.Map """ padding = "0px 0px 0px 5px" style = {"description_width": "initial"} tool_output = widgets.Output() tool_output_ctrl = ipyleaflet.WidgetControl(widget=tool_output, position="topright") if m.tool_output_ctrl is not None and m.tool_output_ctrl in m.controls: m.remove_control(m.tool_output_ctrl) file_type = widgets.ToggleButtons( options=["Shapefile", "GeoJSON", "CSV", "Vector", "Raster"], tooltips=[ "Open a shapefile", "Open a GeoJSON file", "Open a vector dataset", "Create points from CSV", "Open a vector dataset", "Open a raster dataset", ], ) file_type.style.button_width = "88px" filepath = widgets.Text( value="", description="File path or http URL:", tooltip="Enter a file path or http URL to vector data", style=style, layout=widgets.Layout(width="454px", padding=padding), ) http_widget = widgets.HBox() file_chooser = FileChooser( os.getcwd(), sandbox_path=m.sandbox_path, layout=widgets.Layout(width="454px") ) file_chooser.filter_pattern = "*.shp" file_chooser.use_dir_icons = True style = {"description_width": "initial"} layer_name = widgets.Text( value="Shapefile", description="Enter a layer name:", tooltip="Enter a layer name for the selected file", style=style, layout=widgets.Layout(width="454px", padding="0px 0px 0px 5px"), ) longitude = widgets.Dropdown( options=[], value=None, description="Longitude:", layout=widgets.Layout(width="149px", padding="0px 0px 0px 5px"), style={"description_width": "initial"}, ) latitude = widgets.Dropdown( options=[], value=None, description="Latitude:", layout=widgets.Layout(width="149px", padding="0px 0px 0px 5px"), style={"description_width": "initial"}, ) label = widgets.Dropdown( options=[], value=None, description="Label:", layout=widgets.Layout(width="149px", padding="0px 0px 0px 5px"), style={"description_width": "initial"}, ) csv_widget = widgets.HBox() convert_bool = widgets.Checkbox( description="Convert to ee.FeatureCollection?", indent=False, layout=widgets.Layout(padding="0px 0px 0px 5px"), ) convert_hbox = widgets.HBox([convert_bool]) ok_cancel = widgets.ToggleButtons( value=None, options=["Apply", "Reset", "Close"], tooltips=["Apply", "Reset", "Close"], button_style="primary", ) # ok_cancel.style.button_width = "133px" bands = widgets.Text( value=None, description="Band:", tooltip="Enter a list of band indices", style=style, layout=widgets.Layout(width="150px", padding=padding), ) vmin = widgets.Text( value=None, description="vmin:", tooltip="Minimum value of the raster to visualize", style=style, layout=widgets.Layout(width="148px"), ) vmax = widgets.Text( value=None, description="vmax:", tooltip="Maximum value of the raster to visualize", style=style, layout=widgets.Layout(width="148px"), ) nodata = widgets.Text( value=None, description="Nodata:", tooltip="Nodata the raster to visualize", style=style, layout=widgets.Layout(width="150px", padding=padding), ) palette = widgets.Dropdown( options=[], value=None, description="palette:", layout=widgets.Layout(width="300px"), style=style, ) raster_options = widgets.VBox() main_widget = widgets.VBox( [ file_type, file_chooser, http_widget, csv_widget, layer_name, convert_hbox, raster_options, ok_cancel, ] ) tool_output.clear_output() with tool_output: display(main_widget) def bands_changed(change): if change["new"] and "," in change["owner"].value: palette.value = None palette.disabled = True else: palette.disabled = False bands.observe(bands_changed, "value") def chooser_callback(chooser): filepath.value = file_chooser.selected if file_type.value == "CSV": import pandas as pd df = pd.read_csv(filepath.value) col_names = df.columns.values.tolist() longitude.options = col_names latitude.options = col_names label.options = col_names if "longitude" in col_names: longitude.value = "longitude" if "latitude" in col_names: latitude.value = "latitude" if "name" in col_names: label.value = "name" file_chooser.register_callback(chooser_callback) def file_type_changed(change): ok_cancel.value = None file_chooser.default_path = os.getcwd() file_chooser.reset() layer_name.value = file_type.value csv_widget.children = [] filepath.value = "" if change["new"] == "Shapefile": file_chooser.filter_pattern = "*.shp" raster_options.children = [] convert_hbox.children = [convert_bool] http_widget.children = [] elif change["new"] == "GeoJSON": file_chooser.filter_pattern = "*.geojson" raster_options.children = [] convert_hbox.children = [convert_bool] http_widget.children = [filepath] elif change["new"] == "Vector": file_chooser.filter_pattern = "*.*" raster_options.children = [] convert_hbox.children = [convert_bool] http_widget.children = [filepath] elif change["new"] == "CSV": file_chooser.filter_pattern = ["*.csv", "*.CSV"] csv_widget.children = [longitude, latitude, label] raster_options.children = [] convert_hbox.children = [convert_bool] http_widget.children = [filepath] elif change["new"] == "Raster": file_chooser.filter_pattern = ["*.tif", "*.img"] palette.options = get_palettable(types=["matplotlib", "cartocolors"]) palette.value = None raster_options.children = [ widgets.HBox([bands, vmin, vmax]), widgets.HBox([nodata, palette]), ] convert_hbox.children = [] http_widget.children = [filepath] def ok_cancel_clicked(change): if change["new"] == "Apply": m.default_style = {"cursor": "wait"} file_path = filepath.value if file_path is not None: ext = os.path.splitext(file_path)[1] with tool_output: if ext.lower() == ".shp": if convert_bool.value: ee_object = shp_to_ee(file_path) m.addLayer(ee_object, {}, layer_name.value) else: m.add_shapefile( file_path, style={}, layer_name=layer_name.value ) elif ext.lower() == ".geojson": if convert_bool.value: ee_object = geojson_to_ee(file_path) m.addLayer(ee_object, {}, layer_name.value) else: m.add_geojson( file_path, style={}, layer_name=layer_name.value ) elif ext.lower() == ".csv": if convert_bool.value: ee_object = csv_to_ee( file_path, latitude.value, longitude.value ) m.addLayer(ee_object, {}, layer_name.value) else: m.add_xy_data( file_path, x=longitude.value, y=latitude.value, label=label.value, layer_name=layer_name.value, ) elif ext.lower() in [".tif", "img"] and file_type.value == "Raster": band = None vis_min = None vis_max = None vis_nodata = None try: if len(bands.value) > 0: band = int(bands.value) if len(vmin.value) > 0: vis_min = float(vmin.value) if len(vmax.value) > 0: vis_max = float(vmax.value) if len(nodata.value) > 0: vis_nodata = float(nodata.value) except: pass m.add_local_tile( file_path, layer_name=layer_name.value, band=band, palette=palette.value, vmin=vis_min, vmax=vis_max, nodata=vis_nodata, ) else: m.add_vector(file_path, style={}, layer_name=layer_name.value) else: print("Please select a file to open.") m.toolbar_reset() m.default_style = {"cursor": "default"} elif change["new"] == "Reset": file_chooser.reset() tool_output.clear_output() with tool_output: display(main_widget) m.toolbar_reset() elif change["new"] == "Close": if m.tool_output_ctrl is not None and m.tool_output_ctrl in m.controls: m.remove_control(m.tool_output_ctrl) m.tool_output_ctrl = None m.toolbar_reset() ok_cancel.value = None file_type.observe(file_type_changed, names="value") ok_cancel.observe(ok_cancel_clicked, names="value") # file_chooser.register_callback(chooser_callback) m.add_control(tool_output_ctrl) m.tool_output_ctrl = tool_output_ctrl def change_basemap(m): """Widget for change basemaps. Args: m (object): geemap.Map() """ from .basemaps import _ee_basemaps dropdown = widgets.Dropdown( options=list(_ee_basemaps.keys()), value="ROADMAP", layout=widgets.Layout(width="200px") # description="Basemaps", ) close_btn = widgets.Button( icon="times", tooltip="Close the basemap widget", button_style="primary", layout=widgets.Layout(width="32px"), ) basemap_widget = widgets.HBox([dropdown, close_btn]) def on_click(change): basemap_name = change["new"] if len(m.layers) == 1: old_basemap = m.layers[0] else: old_basemap = m.layers[1] m.substitute_layer(old_basemap, _ee_basemaps[basemap_name]) dropdown.observe(on_click, "value") def close_click(change): m.toolbar_reset() if m.basemap_ctrl is not None and m.basemap_ctrl in m.controls: m.remove_control(m.basemap_ctrl) basemap_widget.close() close_btn.on_click(close_click) basemap_control = ipyleaflet.WidgetControl( widget=basemap_widget, position="topright" ) m.add_control(basemap_control) m.basemap_ctrl = basemap_control def convert_js2py(m): """A widget for converting Earth Engine JavaScript to Python. Args: m (object): geemap.Map """ full_widget = widgets.VBox(layout=widgets.Layout(width="465px", height="350px")) text_widget = widgets.Textarea( placeholder="Paste your Earth Engine JavaScript into this textbox and click the Convert button below to convert the Javascript to Python", layout=widgets.Layout(width="455px", height="310px"), ) buttons = widgets.ToggleButtons( value=None, options=["Convert", "Clear", "Close"], tooltips=["Convert", "Clear", "Close"], button_style="primary", ) buttons.style.button_width = "142px" def button_clicked(change): if change["new"] == "Convert": from .conversion import create_new_cell, js_snippet_to_py if len(text_widget.value) > 0: out_lines = js_snippet_to_py( text_widget.value, add_new_cell=False, import_ee=False, import_geemap=False, show_map=False, ) if len(out_lines) > 0 and len(out_lines[0].strip()) == 0: out_lines = out_lines[1:] text_widget.value = "".join(out_lines) create_code_cell(text_widget.value) elif change["new"] == "Clear": text_widget.value = "" elif change["new"] == "Close": m.toolbar_reset() if m.convert_ctrl is not None and m.convert_ctrl in m.controls: m.remove_control(m.convert_ctrl) full_widget.close() buttons.value = None buttons.observe(button_clicked, "value") full_widget.children = [text_widget, buttons] widget_control = ipyleaflet.WidgetControl(widget=full_widget, position="topright") m.add_control(widget_control) m.convert_ctrl = widget_control def collect_samples(m): full_widget = widgets.VBox() layout = widgets.Layout(width="100px") prop_label = widgets.Label( value="Property", layout=widgets.Layout(display="flex", justify_content="center", width="100px"), ) value_label = widgets.Label( value="Value", layout=widgets.Layout(display="flex", justify_content="center", width="100px"), ) color_label = widgets.Label( value="Color", layout=widgets.Layout(display="flex", justify_content="center", width="100px"), ) prop_text1 = widgets.Text(layout=layout, placeholder="Required") value_text1 = widgets.Text(layout=layout, placeholder="Integer") prop_text2 = widgets.Text(layout=layout, placeholder="Optional") value_text2 = widgets.Text(layout=layout, placeholder="String") color = widgets.ColorPicker( concise=False, value="#3388ff", layout=layout, style={"description_width": "initial"}, ) buttons = widgets.ToggleButtons( value=None, options=["Apply", "Clear", "Close"], tooltips=["Apply", "Clear", "Close"], button_style="primary", ) buttons.style.button_width = "99px" def button_clicked(change): if change["new"] == "Apply": if len(color.value) != 7: color.value = "#3388ff" draw_control = ipyleaflet.DrawControl( marker={"shapeOptions": {"color": color.value}, "repeatMode": True}, rectangle={"shapeOptions": {"color": color.value}, "repeatMode": True}, polygon={"shapeOptions": {"color": color.value}, "repeatMode": True}, circlemarker={}, polyline={}, edit=False, remove=False, ) controls = [] old_draw_control = None for control in m.controls: if isinstance(control, ipyleaflet.DrawControl): controls.append(draw_control) old_draw_control = control else: controls.append(control) m.controls = tuple(controls) old_draw_control.close() m.draw_control = draw_control train_props = {} if prop_text1.value != "" and value_text1.value != "": try: _ = int(value_text1.value) except Exception as _: value_text1.placeholder = "Integer only" value_text1.value = "" return train_props[prop_text1.value] = int(value_text1.value) if prop_text2.value != "" and value_text2.value != "": train_props[prop_text2.value] = value_text2.value if color.value != "": train_props["color"] = color.value # Handles draw events def handle_draw(target, action, geo_json): from .geemap import ee_tile_layer try: geom = geojson_to_ee(geo_json, False) m.user_roi = geom if len(train_props) > 0: feature = ee.Feature(geom, train_props) else: feature = ee.Feature(geom) m.draw_last_json = geo_json m.draw_last_feature = feature if action == "deleted" and len(m.draw_features) > 0: m.draw_features.remove(feature) m.draw_count -= 1 else: m.draw_features.append(feature) m.draw_count += 1 collection = ee.FeatureCollection(m.draw_features) m.user_rois = collection ee_draw_layer = ee_tile_layer( collection, {"color": "blue"}, "Drawn Features", False, 0.5 ) draw_layer_index = m.find_layer_index("Drawn Features") if draw_layer_index == -1: m.add_layer(ee_draw_layer) m.draw_layer = ee_draw_layer else: m.substitute_layer(m.draw_layer, ee_draw_layer) m.draw_layer = ee_draw_layer except Exception as e: m.draw_count = 0 m.draw_features = [] m.draw_last_feature = None m.draw_layer = None m.user_roi = None m.roi_start = False m.roi_end = False print("There was an error creating Earth Engine Feature.") raise Exception(e) draw_control.on_draw(handle_draw) elif change["new"] == "Clear": prop_text1.value = "" value_text1.value = "" prop_text2.value = "" value_text2.value = "" color.value = "#3388ff" elif change["new"] == "Close": m.toolbar_reset() if m.training_ctrl is not None and m.training_ctrl in m.controls: m.remove_control(m.training_ctrl) full_widget.close() buttons.value = None buttons.observe(button_clicked, "value") full_widget.children = [ widgets.HBox([prop_label, value_label, color_label]), widgets.HBox([prop_text1, value_text1, color]), widgets.HBox([prop_text2, value_text2, color]), buttons, ] widget_control = ipyleaflet.WidgetControl(widget=full_widget, position="topright") m.add_control(widget_control) m.training_ctrl = widget_control def get_tools_dict(): import pandas as pd import pkg_resources pkg_dir = os.path.dirname(pkg_resources.resource_filename("geemap", "geemap.py")) toolbox_csv = os.path.join(pkg_dir, "data/template/toolbox.csv") df = pd.read_csv(toolbox_csv).set_index("index") tools_dict = df.to_dict("index") return tools_dict def tool_gui(tool_dict, max_width="420px", max_height="600px"): """Create a GUI for a tool based on the tool dictionary. Args: tool_dict (dict): The dictionary containing the tool info. max_width (str, optional): The max width of the tool dialog. max_height (str, optional): The max height of the tool dialog. Returns: object: An ipywidget object representing the tool interface. """ tool_widget = widgets.VBox( layout=widgets.Layout(max_width=max_width, max_height=max_height) ) children = [] args = {} required_inputs = [] style = {"description_width": "initial"} max_width = str(int(max_width.replace("px", "")) - 10) + "px" header_width = str(int(max_width.replace("px", "")) - 104) + "px" header = widgets.Label( value=f'Current Tool: {tool_dict['label']}', style=style, layout=widgets.Layout(width=header_width), ) code_btn = widgets.Button( description="View Code", layout=widgets.Layout(width="100px") ) children.append(widgets.HBox([header, code_btn])) desc = widgets.Textarea( value=f'Description: {tool_dict['description']}', layout=widgets.Layout(width="410px", max_width=max_width), disabled=True, ) children.append(desc) run_btn = widgets.Button(description="Run", layout=widgets.Layout(width="100px")) cancel_btn = widgets.Button( description="Cancel", layout=widgets.Layout(width="100px") ) help_btn = widgets.Button(description="Help", layout=widgets.Layout(width="100px")) import_btn = widgets.Button( description="Import", tooltip="Import the script to a new cell", layout=widgets.Layout(width="98px"), ) tool_output = widgets.Output(layout=widgets.Layout(max_height="200px")) children.append(widgets.HBox([run_btn, cancel_btn, help_btn, import_btn])) children.append(tool_output) tool_widget.children = children def run_button_clicked(b): tool_output.clear_output() required_params = required_inputs.copy() args2 = [] for arg in args: line = "" if isinstance(args[arg], FileChooser): if arg in required_params and args[arg].selected is None: with tool_output: print(f"Please provide inputs for required parameters.") break elif arg in required_params: required_params.remove(arg) if arg == "i": line = f"-{arg}={args[arg].selected}" else: line = f"--{arg}={args[arg].selected}" elif isinstance(args[arg], widgets.Text): if arg in required_params and len(args[arg].value) == 0: with tool_output: print(f"Please provide inputs for required parameters.") break elif arg in required_params: required_params.remove(arg) if args[arg].value is not None and len(args[arg].value) > 0: line = f"--{arg}={args[arg].value}" elif isinstance(args[arg], widgets.Checkbox): line = f"--{arg}={args[arg].value}" args2.append(line) if len(required_params) == 0: with tool_output: # wbt.run_tool(tool_dict["name"], args2) pass def help_button_clicked(b): import webbrowser tool_output.clear_output() with tool_output: html = widgets.HTML( value=f'<a href={tool_dict['link']} target="_blank">{tool_dict['link']}</a>' ) display(html) webbrowser.open_new_tab(tool_dict["link"]) def code_button_clicked(b): import webbrowser with tool_output: html = widgets.HTML( value=f'<a href={tool_dict['link']} target="_blank">{tool_dict['link']}</a>' ) display(html) webbrowser.open_new_tab(tool_dict["link"]) def cancel_btn_clicked(b): tool_output.clear_output() def import_button_clicked(b): tool_output.clear_output() content = [] create_code_cell("\n".join(content)) import_btn.on_click(import_button_clicked) run_btn.on_click(run_button_clicked) help_btn.on_click(help_button_clicked) code_btn.on_click(code_button_clicked) cancel_btn.on_click(cancel_btn_clicked) return tool_widget def build_toolbox(tools_dict, max_width="1080px", max_height="600px"): """Build the GEE toolbox. Args: tools_dict (dict): A dictionary containing information for all tools. max_width (str, optional): The maximum width of the widget. max_height (str, optional): The maximum height of the widget. Returns: object: An ipywidget representing the toolbox. """ left_widget = widgets.VBox(layout=widgets.Layout(min_width="175px")) center_widget = widgets.VBox( layout=widgets.Layout(min_width="200px", max_width="200px") ) right_widget = widgets.Output( layout=widgets.Layout(width="630px", max_height=max_height) ) full_widget = widgets.HBox( [left_widget, center_widget, right_widget], layout=widgets.Layout(max_width=max_width, max_height=max_height), ) search_widget = widgets.Text( placeholder="Search tools ...", layout=widgets.Layout(width="170px") ) label_widget = widgets.Label(layout=widgets.Layout(width="170px")) label_widget.value = f"{len(tools_dict)} Available Tools" close_btn = widgets.Button( description="Close Toolbox", icon="close", layout=widgets.Layout(width="170px") ) categories = {} categories["All Tools"] = [] for key in tools_dict.keys(): category = tools_dict[key]["category"] if category not in categories.keys(): categories[category] = [] categories[category].append(tools_dict[key]["name"]) categories["All Tools"].append(tools_dict[key]["name"]) options = list(categories.keys()) all_tools = categories["All Tools"] all_tools.sort() category_widget = widgets.Select( options=options, layout=widgets.Layout(width="170px", height="165px") ) tools_widget = widgets.Select( options=[], layout=widgets.Layout(width="195px", height="400px") ) def category_selected(change): if change["new"]: selected = change["owner"].value options = categories[selected] options.sort() tools_widget.options = options label_widget.value = f"{len(options)} Available Tools" category_widget.observe(category_selected, "value") def tool_selected(change): if change["new"]: selected = change["owner"].value tool_dict = tools_dict[selected] with right_widget: right_widget.clear_output() display(tool_gui(tool_dict, max_height=max_height)) tools_widget.observe(tool_selected, "value") def search_changed(change): if change["new"]: keyword = change["owner"].value if len(keyword) > 0: selected_tools = [] for tool in all_tools: if keyword.lower() in tool.lower(): selected_tools.append(tool) if len(selected_tools) > 0: tools_widget.options = selected_tools label_widget.value = f"{len(selected_tools)} Available Tools" else: tools_widget.options = all_tools label_widget.value = f"{len(tools_dict)} Available Tools" search_widget.observe(search_changed, "value") def close_btn_clicked(b): full_widget.close() close_btn.on_click(close_btn_clicked) category_widget.value = list(categories.keys())[0] tools_widget.options = all_tools left_widget.children = [category_widget, search_widget, label_widget, close_btn] center_widget.children = [tools_widget] return full_widget def timelapse_gui(m=None): """Creates timelapse animations. Args: m (geemap.Map, optional): A geemap Map instance. Defaults to None. Returns: ipywidgets: The interactive GUI. """ if m is not None: m.add_basemap("HYBRID") widget_width = "350px" padding = "0px 0px 0px 5px" # upper, right, bottom, left style = {"description_width": "initial"} toolbar_button = widgets.ToggleButton( value=False, tooltip="Toolbar", icon="gear", layout=widgets.Layout(width="28px", height="28px", padding="0px 0px 0px 4px"), ) close_button = widgets.ToggleButton( value=False, tooltip="Close the tool", icon="times", button_style="primary", layout=widgets.Layout(height="28px", width="28px", padding="0px 0px 0px 4px"), ) collection = widgets.Dropdown( options=[ "Landsat TM-ETM-OLI Surface Reflectance", "Sentinel-2AB Surface Reflectance", "MODIS", ], value="Landsat TM-ETM-OLI Surface Reflectance", description="Collection:", layout=widgets.Layout(width=widget_width, padding=padding), style=style, ) title = widgets.Text( value="Timelapse", description="Title:", style=style, layout=widgets.Layout(width="181px", padding=padding), ) bands = widgets.Dropdown( description="RGB:", options=[ "Red/Green/Blue", "NIR/Red/Green", "SWIR2/SWIR1/NIR", "NIR/SWIR1/Red", "SWIR2/NIR/Red", "SWIR2/SWIR1/Red", "SWIR1/NIR/Blue", "NIR/SWIR1/Blue", "SWIR2/NIR/Green", "SWIR1/NIR/Red", ], value="NIR/Red/Green", style=style, layout=widgets.Layout(width="165px", padding=padding), ) speed = widgets.IntSlider( description="Frames/sec:", tooltip="Frames per second", value=10, min=1, max=30, readout=False, style=style, layout=widgets.Layout(width="142px", padding=padding), ) speed_label = widgets.Label( layout=widgets.Layout(width="20px", padding=padding), ) widgets.jslink((speed, "value"), (speed_label, "value")) cloud = widgets.Checkbox( value=True, description="Apply fmask (remove clouds, shadows, snow)", tooltip="Apply fmask (remove clouds, shadows, snow)", style=style, ) start_year = widgets.IntSlider( description="Start Year:", value=1984, min=1984, max=2021, readout=False, style=style, layout=widgets.Layout(width="138px", padding=padding), ) start_year_label = widgets.Label() widgets.jslink((start_year, "value"), (start_year_label, "value")) end_year = widgets.IntSlider( description="End Year:", value=2020, min=1984, max=2021, readout=False, style=style, layout=widgets.Layout(width="138px", padding=padding), ) end_year_label = widgets.Label() widgets.jslink((end_year, "value"), (end_year_label, "value")) start_month = widgets.IntSlider( description="Start Month:", value=5, min=1, max=12, readout=False, style=style, layout=widgets.Layout(width="145px", padding=padding), ) start_month_label = widgets.Label( layout=widgets.Layout(width="20px", padding=padding), ) widgets.jslink((start_month, "value"), (start_month_label, "value")) end_month = widgets.IntSlider( description="End Month:", value=10, min=1, max=12, readout=False, style=style, layout=widgets.Layout(width="155px", padding=padding), ) end_month_label = widgets.Label() widgets.jslink((end_month, "value"), (end_month_label, "value")) font_size = widgets.IntSlider( description="Font size:", value=30, min=10, max=50, readout=False, style=style, layout=widgets.Layout(width="152px", padding=padding), ) font_size_label = widgets.Label() widgets.jslink((font_size, "value"), (font_size_label, "value")) font_color = widgets.ColorPicker( concise=False, description="Font color:", value="white", style=style, layout=widgets.Layout(width="170px", padding=padding), ) progress_bar_color = widgets.ColorPicker( concise=False, description="Progress bar:", value="blue", style=style, layout=widgets.Layout(width="180px", padding=padding), ) # Normalized Satellite Indices: https://www.usna.edu/Users/oceano/pguth/md_help/html/norm_sat.htm nd_options = [ "Vegetation Index (NDVI)", "Water Index (NDWI)", "Modified Water Index (MNDWI)", "Snow Index (NDSI)", "Soil Index (NDSI)", "Burn Ratio (NBR)", "Customized", ] nd_indices = widgets.Dropdown( options=nd_options, value=None, description="Normalized Difference Index:", style=style, layout=widgets.Layout(width="347px", padding=padding), ) first_band = widgets.Dropdown( description="1st band:", options=["Blue", "Green", "Red", "NIR", "SWIR1", "SWIR2"], value=None, style=style, layout=widgets.Layout(width="171px", padding=padding), ) second_band = widgets.Dropdown( description="2nd band:", options=["Blue", "Green", "Red", "NIR", "SWIR1", "SWIR2"], value=None, style=style, layout=widgets.Layout(width="172px", padding=padding), ) nd_threshold = widgets.FloatSlider( value=0, min=-1, max=1, step=0.01, description="Threshold:", orientation="horizontal", readout=False, style=style, layout=widgets.Layout(width="159px", padding=padding), ) nd_threshold_label = widgets.Label( layout=widgets.Layout(width="35px", padding=padding), ) widgets.jslink((nd_threshold, "value"), (nd_threshold_label, "value")) nd_color = widgets.ColorPicker( concise=False, description="Color:", value="blue", style=style, layout=widgets.Layout(width="145px", padding=padding), ) def nd_index_change(change): if nd_indices.value == "Vegetation Index (NDVI)": first_band.value = "NIR" second_band.value = "Red" elif nd_indices.value == "Water Index (NDWI)": first_band.value = "NIR" second_band.value = "SWIR1" elif nd_indices.value == "Modified Water Index (MNDWI)": first_band.value = "Green" second_band.value = "SWIR1" elif nd_indices.value == "Snow Index (NDSI)": first_band.value = "Green" second_band.value = "SWIR1" elif nd_indices.value == "Soil Index (NDSI)": first_band.value = "SWIR1" second_band.value = "NIR" elif nd_indices.value == "Burn Ratio (NBR)": first_band.value = "NIR" second_band.value = "SWIR2" elif nd_indices.value == "Customized": first_band.value = None second_band.value = None nd_indices.observe(nd_index_change, names="value") button_width = "113px" create_gif = widgets.Button( description="Create timelapse", button_style="primary", tooltip="Click to create timelapse", style=style, layout=widgets.Layout(padding="0px", width=button_width), ) def submit_clicked(b): if start_year.value > end_year.value: print("The end year must be great than the start year.") return if start_month.value > end_month.value: print("The end month must be great than the start month.") return if start_year.value == end_year.value: add_progress_bar = False else: add_progress_bar = True start_date = str(start_month.value).zfill(2) + "-01" end_date = str(end_month.value).zfill(2) + "-30" with output: print("Computing... Please wait...") nd_bands = None if (first_band.value is not None) and (second_band.value is not None): nd_bands = [first_band.value, second_band.value] temp_output = widgets.Output() if m is not None: out_dir = os.path.expanduser("~/Downloads") if not os.path.exists(out_dir): os.makedirs(out_dir) out_gif = os.path.join(out_dir, "timelapse_" + random_string(3) + ".gif") with temp_output: temp_output.clear_output() m.add_landsat_ts_gif( roi=m.user_roi, label=title.value, start_year=start_year.value, end_year=end_year.value, start_date=start_date, end_date=end_date, bands=bands.value.split("/"), font_color=font_color.value, frames_per_second=speed.value, font_size=font_size.value, add_progress_bar=add_progress_bar, progress_bar_color=progress_bar_color.value, out_gif=out_gif, apply_fmask=cloud.value, nd_bands=nd_bands, nd_threshold=nd_threshold.value, nd_palette=["black", nd_color.value], ) if m.user_roi is not None: m.centerObject(m.user_roi) with output: print("The timelapse has been added to the map.") link = create_download_link( out_gif, title="Click here to download: ", ) display(link) if nd_bands is not None: link_nd = create_download_link( out_gif.replace(".gif", "_nd.gif"), title="Click here to download: ", ) display(link_nd) create_gif.on_click(submit_clicked) reset_btn = widgets.Button( description="Reset", button_style="primary", style=style, layout=widgets.Layout(padding="0px", width=button_width), ) def reset_btn_click(change): output.clear_output() reset_btn.on_click(reset_btn_click) close_btn = widgets.Button( description="Close", button_style="primary", style=style, layout=widgets.Layout(padding="0px", width=button_width), ) def close_click(change): if m is not None: m.toolbar_reset() if m.tool_control is not None and m.tool_control in m.controls: m.remove_control(m.tool_control) m.tool_control = None toolbar_widget.close() close_btn.on_click(close_click) output = widgets.Output(layout=widgets.Layout(width=widget_width, padding=padding)) toolbar_widget = widgets.VBox() toolbar_widget.children = [toolbar_button] toolbar_header = widgets.HBox() toolbar_header.children = [close_button, toolbar_button] toolbar_footer = widgets.VBox() toolbar_footer.children = [ collection, widgets.HBox([title, bands]), widgets.HBox([speed, speed_label, progress_bar_color]), widgets.HBox([start_year, start_year_label, end_year, end_year_label]), widgets.HBox([start_month, start_month_label, end_month, end_month_label]), widgets.HBox([font_size, font_size_label, font_color]), cloud, nd_indices, widgets.HBox([first_band, second_band]), widgets.HBox([nd_threshold, nd_threshold_label, nd_color]), widgets.HBox([create_gif, reset_btn, close_btn]), output, ] toolbar_event = ipyevents.Event( source=toolbar_widget, watched_events=["mouseenter", "mouseleave"] ) def handle_toolbar_event(event): if event["type"] == "mouseenter": toolbar_widget.children = [toolbar_header, toolbar_footer] elif event["type"] == "mouseleave": if not toolbar_button.value: toolbar_widget.children = [toolbar_button] toolbar_button.value = False close_button.value = False toolbar_event.on_dom_event(handle_toolbar_event) def toolbar_btn_click(change): if change["new"]: close_button.value = False toolbar_widget.children = [toolbar_header, toolbar_footer] else: if not close_button.value: toolbar_widget.children = [toolbar_button] toolbar_button.observe(toolbar_btn_click, "value") def close_btn_click(change): if change["new"]: toolbar_button.value = False if m is not None: if m.tool_control is not None and m.tool_control in m.controls: m.remove_control(m.tool_control) m.tool_control = None m.toolbar_reset() toolbar_widget.close() close_button.observe(close_btn_click, "value") toolbar_button.value = True if m is not None: toolbar_control = ipyleaflet.WidgetControl( widget=toolbar_widget, position="topright" ) if toolbar_control not in m.controls: m.add_control(toolbar_control) m.tool_control = toolbar_control else: return toolbar_widget def time_slider(m=None): """Creates a time slider for visualizing any ee.ImageCollection. Args: m (geemap.Map, optional): A geemap Map instance. Defaults to None. Returns: ipywidgets: The interactive GUI. """ import matplotlib as mpl import matplotlib.pyplot as plt widget_width = "350px" padding = "0px 0px 0px 5px" # upper, right, bottom, left style = {"description_width": "initial"} toolbar_button = widgets.ToggleButton( value=False, tooltip="Toolbar", icon="fast-forward", layout=widgets.Layout(width="28px", height="28px", padding="0px 0px 0px 4px"), ) close_button = widgets.ToggleButton( value=False, tooltip="Close the tool", icon="times", button_style="primary", layout=widgets.Layout(height="28px", width="28px", padding="0px 0px 0px 4px"), ) col_options_dict = { "Landsat TM-ETM-OLI Surface Reflectance": { "min": 0, "max": 4000, "bands": ["NIR", "Red", "Green"], "start_year": 1984, "end_year": 2021, "bandnames": ["Blue", "Green", "Red", "NIR", "SWIR1", "SWIR2", "pixel_qa"], }, "MOD13A2.006 Terra Vegetation Indices": { "min": 0, "max": 9000, "start_year": 2000, "end_year": 2021, "palette": [ "FFFFFF", "CE7E45", "DF923D", "F1B555", "FCD163", "99B718", "74A901", "66A000", "529400", "3E8601", "207401", "056201", "004C00", "023B01", "012E01", "011D01", "011301", ], }, "Sentinel-2 Surface Relectance": { "min": 0, "max": 4000, "bands": ["NIR", "Red", "Green"], "start_year": 2015, "end_year": 2021, "bandnames": [ "Blue", "Green", "Red", "Red Edge 1", "Red Edge 2", "Red Edge 3", "NIR", "Red Edge 4", "SWIR1", "SWIR2", "QA60", ], }, "USDA NAIP Imagery": { "min": 0, "max": 255, "bands": ["R", "G", "B"], "start_year": 2003, "end_year": 2021, "bandnames": ["R", "G", "B", "N"], }, } col_options = list(col_options_dict.keys()) if m is not None: col_options += m.ee_raster_layer_names collection = widgets.Dropdown( options=col_options, value=col_options[0], description="Time series:", layout=widgets.Layout(width=widget_width, padding=padding), style=style, ) region = widgets.Dropdown( options=["User-drawn ROI"] + m.ee_vector_layer_names, value="User-drawn ROI", description="Region:", layout=widgets.Layout(width=widget_width, padding=padding), style=style, ) dropdown_width = "97px" landsat_bands = ["Blue", "Green", "Red", "NIR", "SWIR1", "SWIR2", "pixel_qa"] band1_dropdown = widgets.Dropdown( options=landsat_bands, value="NIR", layout=widgets.Layout(width=dropdown_width), ) band2_dropdown = widgets.Dropdown( options=landsat_bands, value="Red", layout=widgets.Layout(width=dropdown_width), ) band3_dropdown = widgets.Dropdown( options=landsat_bands, value="Green", layout=widgets.Layout(width=dropdown_width), ) bands_label = widgets.Label("Bands:", layout=widgets.Layout(padding=padding)) bands_hbox = widgets.HBox( [bands_label, band1_dropdown, band2_dropdown, band3_dropdown] ) vis = widgets.Text( value="", description="Vis min value:", placeholder="{'min': 0, 'max': 1, 'palette': ['red', 'blue']}", style=style, layout=widgets.Layout(width=widget_width, padding=padding), ) vis_min = widgets.Text( value="0", description="Vis min value:", style=style, layout=widgets.Layout(width="172px", padding=padding), ) vis_max = widgets.Text( value="4000", description="Vis max value:", style=style, layout=widgets.Layout(width="172px", padding=padding), ) opacity = widgets.FloatSlider( value=1, min=0, max=1, step=0.01, description="Opacity:", continuous_update=True, readout=False, readout_format=".2f", layout=widgets.Layout(width="130px", padding=padding), style={"description_width": "50px"}, ) opacity_label = widgets.Label(layout=widgets.Layout(width="40px", padding=padding)) widgets.jslink((opacity, "value"), (opacity_label, "value")) gamma = widgets.FloatSlider( value=1, min=0.1, max=10, step=0.01, description="Gamma:", continuous_update=True, readout=False, readout_format=".2f", layout=widgets.Layout(width="123px", padding=padding), style={"description_width": "50px"}, ) gamma_label = widgets.Label(layout=widgets.Layout(width="40px", padding=padding)) widgets.jslink((gamma, "value"), (gamma_label, "value")) color_picker = widgets.ColorPicker( concise=False, value="#000000", layout=widgets.Layout(width="97px"), style={"description_width": "initial"}, ) add_color = widgets.Button( icon="plus", tooltip="Add a hex color string to the palette", layout=widgets.Layout(width="32px"), ) del_color = widgets.Button( icon="minus", tooltip="Remove a hex color string from the palette", layout=widgets.Layout(width="32px"), ) reset_color = widgets.Button( icon="eraser", tooltip="Remove all color strings from the palette", layout=widgets.Layout(width="34px"), ) classes = widgets.Dropdown( options=["Any"] + [str(i) for i in range(3, 13)], description="Classes:", layout=widgets.Layout(width="150px", padding=padding), style={"description_width": "initial"}, ) colormap = widgets.Dropdown( options=plt.colormaps(), value=None, description="Colormap:", layout=widgets.Layout(width="195px", padding=padding), style={"description_width": "initial"}, ) def classes_changed(change): if change["new"]: selected = change["owner"].value if colormap.value is not None: n_class = None if selected != "Any": n_class = int(classes.value) colors = plt.cm.get_cmap(colormap.value, n_class) cmap_colors = [ mpl.colors.rgb2hex(colors(i))[1:] for i in range(colors.N) ] _, ax = plt.subplots(figsize=(6, 0.4)) cmap = mpl.colors.LinearSegmentedColormap.from_list( "custom", to_hex_colors(cmap_colors), N=256 ) vmin = 0 vmax = 1 try: if vis_min.value != "": vmin = float(vis_min.value) if vis_max.value != "": vmax = float(vis_max.value) except Exception as _: pass norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax) mpl.colorbar.ColorbarBase( ax, norm=norm, cmap=cmap, orientation="horizontal" ) palette.value = ", ".join([color for color in cmap_colors]) if m.colorbar_widget is None: m.colorbar_widget = widgets.Output( layout=widgets.Layout(height="60px") ) if m.colorbar_ctrl is None: m.colorbar_ctrl = ipyleaflet.WidgetControl( widget=m.colorbar_widget, position="bottomright" ) m.add_control(m.colorbar_ctrl) colorbar_output = m.colorbar_widget with colorbar_output: colorbar_output.clear_output() plt.show() classes.observe(classes_changed, "value") palette = widgets.Text( value="", placeholder="", description="Palette:", tooltip="Enter a list of hex color code (RRGGBB)", layout=widgets.Layout(width="137px", padding=padding), style={"description_width": "initial"}, ) def add_color_clicked(b): if color_picker.value is not None: if len(palette.value) == 0: palette.value = color_picker.value[1:] else: palette.value += ", " + color_picker.value[1:] def del_color_clicked(b): if "," in palette.value: items = [item.strip() for item in palette.value.split(",")] palette.value = ", ".join(items[:-1]) else: palette.value = "" def reset_color_clicked(b): palette.value = "" add_color.on_click(add_color_clicked) del_color.on_click(del_color_clicked) reset_color.on_click(reset_color_clicked) def colormap_changed(change): if change["new"]: n_class = None if classes.value != "Any": n_class = int(classes.value) colors = plt.cm.get_cmap(colormap.value, n_class) cmap_colors = [mpl.colors.rgb2hex(colors(i))[1:] for i in range(colors.N)] _, ax = plt.subplots(figsize=(6, 0.4)) cmap = mpl.colors.LinearSegmentedColormap.from_list( "custom", to_hex_colors(cmap_colors), N=256 ) vmin = 0 vmax = 1 try: if vis_min.value != "": vmin = float(vis_min.value) if vis_max.value != "": vmax = float(vis_max.value) except Exception as _: pass norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax) mpl.colorbar.ColorbarBase( ax, norm=norm, cmap=cmap, orientation="horizontal" ) palette.value = ", ".join(cmap_colors) if m.colorbar_widget is None: m.colorbar_widget = widgets.Output(layout=widgets.Layout(height="60px")) if m.colorbar_ctrl is None: m.colorbar_ctrl = ipyleaflet.WidgetControl( widget=m.colorbar_widget, position="bottomright" ) m.add_control(m.colorbar_ctrl) colorbar_output = m.colorbar_widget with colorbar_output: colorbar_output.clear_output() plt.show() colormap.observe(colormap_changed, "value") palette_vbox = widgets.VBox() labels = widgets.Text( value=", ".join([str(i) for i in range(1984, 2021)]), description="Labels:", style=style, layout=widgets.Layout(width="150px", padding=padding), ) speed = widgets.FloatSlider( description="Speed (sec):", tooltip="Time interval in seconds", value=1, min=0.1, max=10, readout=False, style=style, layout=widgets.Layout(width="160px", padding=padding), ) speed_label = widgets.Label( layout=widgets.Layout(width="25px", padding=padding), ) widgets.jslink((speed, "value"), (speed_label, "value")) prebuilt_options = widgets.VBox() cloud = widgets.Checkbox( value=True, description="Apply fmask (remove clouds, shadows, snow)", tooltip="Apply fmask (remove clouds, shadows, snow)", style=style, ) start_year = widgets.IntSlider( description="Start Year:", value=1984, min=1984, max=2021, readout=False, style=style, layout=widgets.Layout(width="138px", padding=padding), ) def year_change(change): if change["new"]: if collection.value != "MOD13A2.006 Terra Vegetation Indices": labels.value = ", ".join( str(i) for i in range(int(start_year.value), int(end_year.value) + 1) ) else: modis_labels = [] for i in range(int(start_year.value), int(end_year.value) + 1): for j in range(1, 13): modis_labels.append(str(i) + "-" + str(j).zfill(2)) labels.value = ", ".join(modis_labels) start_year.observe(year_change, "value") start_year_label = widgets.Label() widgets.jslink((start_year, "value"), (start_year_label, "value")) end_year = widgets.IntSlider( description="End Year:", value=2020, min=1984, max=2021, readout=False, style=style, layout=widgets.Layout(width="138px", padding=padding), ) end_year.observe(year_change, "value") end_year_label = widgets.Label() widgets.jslink((end_year, "value"), (end_year_label, "value")) start_month = widgets.IntSlider( description="Start Month:", value=1, min=1, max=12, readout=False, style=style, layout=widgets.Layout(width="145px", padding=padding), ) start_month_label = widgets.Label( layout=widgets.Layout(width="20px", padding=padding), ) widgets.jslink((start_month, "value"), (start_month_label, "value")) end_month = widgets.IntSlider( description="End Month:", value=12, min=1, max=12, readout=False, style=style, layout=widgets.Layout(width="155px", padding=padding), ) end_month_label = widgets.Label() widgets.jslink((end_month, "value"), (end_month_label, "value")) prebuilt_options.children = [ widgets.HBox([start_year, start_year_label, end_year, end_year_label]), widgets.HBox([start_month, start_month_label, end_month, end_month_label]), cloud, ] button_width = "113px" apply_btn = widgets.Button( description="Apply", button_style="primary", tooltip="Apply the settings to activate the time slider", style=style, layout=widgets.Layout(padding="0px", width=button_width), ) def submit_clicked(b): output.clear_output() with output: if start_year.value > end_year.value: print("The end year must be great than the start year.") return if start_month.value > end_month.value: print("The end month must be great than the start month.") return if m is not None: roi = None if region.value == "User-drawn ROI" and (m.user_roi is not None): roi = m.user_roi elif region.value == "User-drawn ROI" and (m.user_roi is None): with output: print("Use the Drawing tool to create an ROI.") return elif region.value in m.ee_layer_dict: roi = m.ee_layer_dict[region.value]["ee_object"] with output: print("Computing... Please wait...") layer_labels = None vis_params = {} try: if vis_min.value != "": vis_params["min"] = float(vis_min.value) if vis_max.value != "": vis_params["max"] = float(vis_max.value) vis_params["opacity"] = float(opacity.value) if len(bands_hbox.children) > 0 and ( band1_dropdown.value and band2_dropdown.value and band3_dropdown.value ): vis_params["bands"] = [ band1_dropdown.value, band2_dropdown.value, band3_dropdown.value, ] vis_params["gamma"] = float(gamma.value) if len(palette_vbox.children) > 0: if "," in palette.value: vis_params["palette"] = [ i.strip() for i in palette.value.split(",") ] elif len(palette.value) > 0: vis_params["palette"] = palette.value.strip() except Exception as _: with output: print("The vis parmas are invalid.") return if labels.value != "" and "," in labels.value: try: layer_labels = [i.strip() for i in labels.value.split(",")] except Exception as e: raise ValueError(e) if collection.value in m.ee_raster_layer_names: layer = m.ee_layer_dict[collection.value] ee_object = layer["ee_object"] elif collection.value in col_options_dict: start_date = str(start_month.value).zfill(2) + "-01" end_date = str(end_month.value).zfill(2) + "-30" if collection.value == "Landsat TM-ETM-OLI Surface Reflectance": ee_object = landsat_timeseries( roi, int(start_year.value), int(end_year.value), start_date, end_date, cloud.value, ) elif collection.value == "MOD13A2.006 Terra Vegetation Indices": ee_object = modis_timeseries( roi=roi, start_year=int(start_year.value), end_year=int(end_year.value), start_date=start_date, end_date=end_date, ) elif collection.value == "Sentinel-2 Surface Relectance": ee_object = sentinel2_timeseries( roi, int(start_year.value), int(end_year.value), start_date, end_date, cloud.value, ) elif collection.value == "USDA NAIP Imagery": if int(start_year.value) < 2009 and ( band1_dropdown.value == "N" or band2_dropdown.value == "N" or band3_dropdown.value == "N" ): with output: output.clear_output() print("4-band NAIP imagery not available before 2009.") return ee_object = naip_timeseries(roi, start_year.value, end_year.value) m.add_time_slider( ee_object, region=roi, vis_params=vis_params, labels=layer_labels, time_interval=speed.value, ) output.clear_output() if m.colorbar_ctrl is not None: m.remove_control(m.colorbar_ctrl) m.colorbar_ctrl = None apply_btn.on_click(submit_clicked) reset_btn = widgets.Button( description="Reset", button_style="primary", style=style, layout=widgets.Layout(padding="0px", width=button_width), ) def reset_btn_click(change): output.clear_output() collection.value = col_options[0] region.value = "User-drawn ROI" vis.value = "" labels.value = "1, 2, 3" speed.value = 1 if m.colorbar_ctrl is not None: m.remove_control(m.colorbar_ctrl) m.colorbar_ctrl = None reset_btn.on_click(reset_btn_click) close_btn = widgets.Button( description="Close", button_style="primary", style=style, layout=widgets.Layout(padding="0px", width=button_width), ) def close_click(change): if m is not None: m.toolbar_reset() if m.tool_control is not None and m.tool_control in m.controls: m.remove_control(m.tool_control) m.tool_control = None if m.colorbar_ctrl is not None: m.remove_control(m.colorbar_ctrl) m.colorbar_ctrl = None toolbar_widget.close() close_btn.on_click(close_click) def collection_changed(change): if change["new"]: selected = change["owner"].value if selected in m.ee_layer_dict: prebuilt_options.children = [] labels.value = "" region.value = None ee_object = m.ee_layer_dict[selected]["ee_object"] vis_params = m.ee_layer_dict[selected]["vis_params"] if isinstance(ee_object, ee.Image): palette_vbox.children = [ widgets.HBox([classes, colormap]), widgets.HBox( [palette, color_picker, add_color, del_color, reset_color] ), ] bands_hbox.children = [] elif isinstance(ee_object, ee.ImageCollection): first = ee.Image(ee_object.first()) band_names = first.bandNames().getInfo() band_count = len(band_names) if band_count > 2: band1_dropdown.options = band_names band2_dropdown.options = band_names band3_dropdown.options = band_names band1_dropdown.value = band_names[2] band2_dropdown.value = band_names[1] band3_dropdown.value = band_names[0] palette_vbox.children = [] bands_hbox.children = [ bands_label, band1_dropdown, band2_dropdown, band3_dropdown, ] else: palette_vbox.children = [ widgets.HBox([classes, colormap]), widgets.HBox( [ palette, color_picker, add_color, del_color, reset_color, ] ), ] bands_hbox.children = [] if "min" in vis_params: vis_min.value = str(vis_params["min"]) if "max" in vis_params: vis_max.value = str(vis_params["max"]) if "opacity" in vis_params: opacity.value = str(vis_params["opacity"]) if "gamma" in vis_params: if isinstance(vis_params["gamma"], list): gamma.value = str(vis_params["gamma"][0]) else: gamma.value = str(vis_params["gamma"]) if "palette" in vis_params: palette.value = ", ".join(vis_params["palette"]) else: prebuilt_options.children = [ widgets.HBox( [start_year, start_year_label, end_year, end_year_label] ), widgets.HBox( [start_month, start_month_label, end_month, end_month_label] ), cloud, ] if selected == "MOD13A2.006 Terra Vegetation Indices": palette_vbox.children = [ widgets.HBox([classes, colormap]), widgets.HBox( [ palette, color_picker, add_color, del_color, reset_color, ] ), ] bands_hbox.children = [] palette.value = ", ".join(col_options_dict[selected]["palette"]) modis_labels = [] for i in range(int(start_year.value), int(end_year.value) + 1): for j in range(1, 13): modis_labels.append(str(i) + "-" + str(j).zfill(2)) labels.value = ", ".join(modis_labels) else: bands_hbox.children = [ bands_label, band1_dropdown, band2_dropdown, band3_dropdown, ] bandnames = col_options_dict[selected]["bandnames"] band1_dropdown.options = bandnames band2_dropdown.options = bandnames band3_dropdown.options = bandnames if ( selected == "Landsat TM-ETM-OLI Surface Reflectance" or selected == "Sentinel-2 Surface Relectance" ): band1_dropdown.value = bandnames[2] band2_dropdown.value = bandnames[1] band3_dropdown.value = bandnames[0] palette_vbox.children = [] elif selected == "USDA NAIP Imagery": band1_dropdown.value = bandnames[0] band2_dropdown.value = bandnames[1] band3_dropdown.value = bandnames[2] palette_vbox.children = [] labels.value = ", ".join( str(i) for i in range(int(start_year.value), int(end_year.value) + 1) ) start_year.min = col_options_dict[selected]["start_year"] start_year.max = col_options_dict[selected]["end_year"] start_year.value = start_year.min end_year.min = col_options_dict[selected]["start_year"] end_year.max = col_options_dict[selected]["end_year"] end_year.value = end_year.max vis_min.value = str(col_options_dict[selected]["min"]) vis_max.value = str(col_options_dict[selected]["max"]) if selected == "MOD13A2.006 Terra Vegetation Indices": start_year.value = "2001" end_year.value = "2020" elif selected == "USDA NAIP Imagery": start_year.value = "2009" end_year.value = "2019" collection.observe(collection_changed, "value") output = widgets.Output(layout=widgets.Layout(width=widget_width, padding=padding)) toolbar_widget = widgets.VBox() toolbar_widget.children = [toolbar_button] toolbar_header = widgets.HBox() toolbar_header.children = [close_button, toolbar_button] toolbar_footer = widgets.VBox() toolbar_footer.children = [ collection, region, bands_hbox, widgets.HBox([vis_min, vis_max]), widgets.HBox([opacity, opacity_label, gamma, gamma_label]), palette_vbox, widgets.HBox([labels, speed, speed_label]), prebuilt_options, widgets.HBox([apply_btn, reset_btn, close_btn]), output, ] toolbar_event = ipyevents.Event( source=toolbar_widget, watched_events=["mouseenter", "mouseleave"] ) def handle_toolbar_event(event): if event["type"] == "mouseenter": toolbar_widget.children = [toolbar_header, toolbar_footer] elif event["type"] == "mouseleave": if not toolbar_button.value: toolbar_widget.children = [toolbar_button] toolbar_button.value = False close_button.value = False toolbar_event.on_dom_event(handle_toolbar_event) def toolbar_btn_click(change): if change["new"]: close_button.value = False toolbar_widget.children = [toolbar_header, toolbar_footer] else: if not close_button.value: toolbar_widget.children = [toolbar_button] toolbar_button.observe(toolbar_btn_click, "value") def close_btn_click(change): if change["new"]: toolbar_button.value = False if m is not None: if m.tool_control is not None and m.tool_control in m.controls: m.remove_control(m.tool_control) m.tool_control = None m.toolbar_reset() toolbar_widget.close() if m.colorbar_ctrl is not None: m.remove_control(m.colorbar_ctrl) m.colorbar_ctrl = None close_button.observe(close_btn_click, "value") toolbar_button.value = True if m is not None: toolbar_control = ipyleaflet.WidgetControl( widget=toolbar_widget, position="topright" ) if toolbar_control not in m.controls: m.add_control(toolbar_control) m.tool_control = toolbar_control else: return toolbar_widget def plot_transect(m=None): from bqplot import pyplot as plt widget_width = "250px" padding = "0px 0px 0px 5px" # upper, right, bottom, left toolbar_button = widgets.ToggleButton( value=False, tooltip="Show or hide the toolbar", icon="line-chart", layout=widgets.Layout(width="28px", height="28px", padding="0px 0px 0px 4px"), ) close_button = widgets.ToggleButton( value=False, tooltip="Close the tool", icon="times", button_style="primary", layout=widgets.Layout(height="28px", width="28px", padding="0px 0px 0px 4px"), ) layer = widgets.Dropdown( options=["Option 1", "Option 2", "Option 3"], value=None, description="Image:", layout=widgets.Layout(width=widget_width, padding=padding), style={"description_width": "initial"}, ) band = widgets.Dropdown( options=["Option 1", "Option 2", "Option 3"], value=None, description="Band:", layout=widgets.Layout(width=widget_width, padding=padding), style={"description_width": "initial"}, ) reducer = widgets.Dropdown( options=["mean", "median", "min", "max", "mode", "sum", "stdDev", "variance"], value="mean", description="Stats:", layout=widgets.Layout(width="120px", padding=padding), style={"description_width": "initial"}, ) segments = widgets.IntText( value="100", description="Segments:", placeholder="Number of segments", style={"description_width": "initial"}, layout=widgets.Layout(width="126px", padding=padding), ) dist_interval = widgets.Text( value="", description="Distance interval (m):", placeholder="Optional", style={"description_width": "initial"}, layout=widgets.Layout(width=widget_width, padding=padding), ) title = widgets.Text( value="", description="Plot title:", placeholder="Plot title", style={"description_width": "initial"}, layout=widgets.Layout(width=widget_width, padding=padding), ) xlabel = widgets.Text( value="", description="xlabel:", placeholder="x-axis", style={"description_width": "initial"}, layout=widgets.Layout(width="123px", padding=padding), ) ylabel = widgets.Text( value="", description="ylabel:", placeholder="y-axis", style={"description_width": "initial"}, layout=widgets.Layout(width="123px", padding=padding), ) buttons = widgets.ToggleButtons( value=None, options=["Plot", "Reset", "Close"], tooltips=["Plot transect", "Reset", "Close"], button_style="primary", ) buttons.style.button_width = "80px" output = widgets.Output( layout=widgets.Layout(max_width="500px", max_height="265px", padding=padding) ) toolbar_widget = widgets.VBox() toolbar_widget.children = [toolbar_button] toolbar_header = widgets.HBox() toolbar_header.children = [close_button, toolbar_button] toolbar_footer = widgets.VBox() toolbar_footer.children = [ layer, band, widgets.HBox([reducer, segments]), dist_interval, title, widgets.HBox([xlabel, ylabel]), buttons, ] toolbar_event = ipyevents.Event( source=toolbar_widget, watched_events=["mouseenter", "mouseleave"] ) if m is not None: layer.options = m.ee_raster_layer_names if len(layer.options) > 0: image = m.ee_layer_dict[layer.value]["ee_object"] if isinstance(image, ee.ImageCollection): image = image.toBands() band.options = image.bandNames().getInfo() transect_control = ipyleaflet.WidgetControl( widget=output, position="bottomright" ) m.add_control(transect_control) m.transect_control = transect_control def layer_changed(change): if change["new"]: if m is not None: image = m.ee_layer_dict[layer.value]["ee_object"] if isinstance(image, ee.ImageCollection): image = image.toBands() band.options = image.bandNames().getInfo() layer.observe(layer_changed, "value") def handle_toolbar_event(event): if event["type"] == "mouseenter": toolbar_widget.children = [toolbar_header, toolbar_footer] elif event["type"] == "mouseleave": if not toolbar_button.value: toolbar_widget.children = [toolbar_button] toolbar_button.value = False close_button.value = False toolbar_event.on_dom_event(handle_toolbar_event) def toolbar_btn_click(change): if change["new"]: close_button.value = False toolbar_widget.children = [toolbar_header, toolbar_footer] else: if not close_button.value: toolbar_widget.children = [toolbar_button] toolbar_button.observe(toolbar_btn_click, "value") def close_btn_click(change): if change["new"]: toolbar_button.value = False if m is not None: m.toolbar_reset() if m.tool_control is not None and m.tool_control in m.controls: m.remove_control(m.tool_control) m.tool_control = None if m.transect_control is not None and m.transect_control in m.controls: m.remove_control(m.transect_control) m.transect_control = None toolbar_widget.close() close_button.observe(close_btn_click, "value") def button_clicked(change): if change["new"] == "Plot": with output: output.clear_output() if m is not None: if m.user_roi is not None: line = m.user_roi geom_type = line.type().getInfo() if geom_type != "LineString": print("Use drawing tool to draw a line") else: image = m.ee_layer_dict[layer.value]["ee_object"] if isinstance(image, ee.ImageCollection): image = image.toBands() image = image.select([band.value]) if dist_interval.value == "": dist = None else: dist = float(dist_interval.value) print("Computing ...") df = extract_transect( image, line, reducer.value, int(segments.value), dist, to_pandas=True, ) output.clear_output() fig = plt.figure(title=title.value) fig.layout.width = output.layout.max_width fig.layout.height = output.layout.max_height plt.plot(df["distance"], df[reducer.value]) plt.xlabel(xlabel.value) plt.ylabel(ylabel.value) plt.show() else: print("Use drawing tool to draw a line") elif change["new"] == "Reset": output.clear_output() elif change["new"] == "Close": if m is not None: m.toolbar_reset() if m.tool_control is not None and m.tool_control in m.controls: m.remove_control(m.tool_control) m.tool_control = None if m.transect_control is not None and m.transect_control in m.controls: m.remove_control(m.transect_control) m.transect_control = None toolbar_widget.close() buttons.value = None buttons.observe(button_clicked, "value") toolbar_button.value = True if m is not None: toolbar_control = ipyleaflet.WidgetControl( widget=toolbar_widget, position="topright" ) if toolbar_control not in m.controls: m.add_control(toolbar_control) m.tool_control = toolbar_control else: return toolbar_widget def sankee_gui(m=None): import sankee widget_width = "250px" padding = "0px 0px 0px 5px" # upper, right, bottom, left toolbar_button = widgets.ToggleButton( value=False, tooltip="Toolbar", icon="random", layout=widgets.Layout(width="28px", height="28px", padding="0px 0px 0px 4px"), ) close_button = widgets.ToggleButton( value=False, tooltip="Close the tool", icon="times", button_style="primary", layout=widgets.Layout(height="28px", width="28px", padding="0px 0px 0px 4px"), ) region = widgets.Dropdown( options=["User-drawn ROI"], value="User-drawn ROI", description="Region:", layout=widgets.Layout(width=widget_width, padding=padding), style={"description_width": "initial"}, ) def region_changed(change): if change["new"] == "Las Vegas": if m is not None: las_vegas = ee.Geometry.Polygon( [ [ [-115.01184401606046, 36.24170785506492], [-114.98849806879484, 36.29928186470082], [-115.25628981684171, 36.35238941394592], [-115.34692702387296, 36.310348922031565], [-115.37988600824796, 36.160811202271944], [-115.30298171137296, 36.03653336474891], [-115.25628981684171, 36.05207884201088], [-115.26590285395109, 36.226199908103695], [-115.19174513910734, 36.25499793268206], ] ] ) m.addLayer(las_vegas, {}, "Las Vegas") m.centerObject(las_vegas, 10) region.observe(region_changed, "value") dataset = widgets.Dropdown( options=[ "NLCD - National Land Cover Database", "MCD12Q1 - MODIS Global Land Cover", "CGLS - Copernicus Global Land Cover", "LCMS - Land Change Monitoring System", ], value="NLCD - National Land Cover Database", description="Dataset:", layout=widgets.Layout(width=widget_width, padding=padding), style={"description_width": "initial"}, ) NLCD_options = ["2001", "2004", "2006", "2008", "2011", "2013", "2016"] MODIS_options = [str(y) for y in range(2001, 2020)] CGLS_options = [str(y) for y in range(2015, 2020)] LCMS_options = [str(y) for y in range(1985, 2021)] before = widgets.Dropdown( options=NLCD_options, value="2001", description="Before:", layout=widgets.Layout(width="123px", padding=padding), style={"description_width": "initial"}, ) after = widgets.Dropdown( options=NLCD_options, value="2016", description="After:", layout=widgets.Layout(width="123px", padding=padding), style={"description_width": "initial"}, ) def dataset_changed(change): if change["new"] == "NLCD - National Land Cover Database": before.options = NLCD_options after.options = NLCD_options before.value = NLCD_options[0] after.value = NLCD_options[-1] elif change["new"] == "MCD12Q1 - MODIS Global Land Cover": before.options = MODIS_options after.options = MODIS_options before.value = MODIS_options[0] after.value = MODIS_options[-1] elif change["new"] == "CGLS - Copernicus Global Land Cover": before.options = CGLS_options after.options = CGLS_options before.value = CGLS_options[0] after.value = CGLS_options[-1] elif change["new"] == "LCMS - Land Change Monitoring System": before.options = LCMS_options after.options = LCMS_options before.value = LCMS_options[0] after.value = LCMS_options[-1] dataset.observe(dataset_changed, "value") dataset_template = { "NLCD - National Land Cover Database": sankee.datasets.NLCD2016, "MCD12Q1 - MODIS Global Land Cover": sankee.datasets.MODIS_LC_TYPE1, "CGLS - Copernicus Global Land Cover": sankee.datasets.CGLS_LC100, "LCMS - Land Change Monitoring System": sankee.datasets.LCMS_LC, } band_name = { "NLCD - National Land Cover Database": "landcover", "MCD12Q1 - MODIS Global Land Cover": "LC_Type1", "CGLS - Copernicus Global Land Cover": "discrete_classification", "LCMS - Land Change Monitoring System": "Land_Cover", } samples = widgets.IntText( value=1000, description="Samples:", placeholder="The number of samples points to randomly generate for characterizing all images", style={"description_width": "initial"}, layout=widgets.Layout(width="133px", padding=padding), ) classes = widgets.IntText( value=6, description="Classes:", style={"description_width": "initial"}, layout=widgets.Layout(width="113px", padding=padding), ) title = widgets.Text( value="Land Cover Change", description="Title:", style={"description_width": "initial"}, layout=widgets.Layout(width=widget_width, padding=padding), ) buttons = widgets.ToggleButtons( value=None, options=["Apply", "Reset", "Close"], tooltips=["Apply", "Reset", "Close"], button_style="primary", ) buttons.style.button_width = "80px" output = widgets.Output(layout=widgets.Layout(padding=padding)) toolbar_widget = widgets.VBox() toolbar_widget.children = [toolbar_button] toolbar_header = widgets.HBox() toolbar_header.children = [close_button, toolbar_button] toolbar_footer = widgets.VBox() toolbar_footer.children = [ region, dataset, widgets.HBox([before, after]), widgets.HBox([samples, classes]), title, buttons, output, ] toolbar_event = ipyevents.Event( source=toolbar_widget, watched_events=["mouseenter", "mouseleave"] ) if m is not None: if "Las Vegas" not in m.ee_vector_layer_names: region.options = ["User-drawn ROI", "Las Vegas"] + m.ee_vector_layer_names else: region.options = ["User-drawn ROI"] + m.ee_vector_layer_names plot_close_btn = widgets.Button( tooltip="Close the plot", icon="times", layout=widgets.Layout( height="28px", width="28px", padding="0px 0px 0px 0px" ), ) def plot_close_btn_clicked(b): plot_widget.children = [] plot_close_btn.on_click(plot_close_btn_clicked) plot_reset_btn = widgets.Button( tooltip="Reset the plot", icon="home", layout=widgets.Layout( height="28px", width="28px", padding="0px 0px 0px 0px" ), ) def plot_reset_btn_clicked(b): m.sankee_plot.update_layout( width=600, height=250, margin=dict(l=10, r=10, b=10, t=50, pad=5), ) with plot_output: plot_output.clear_output() display(m.sankee_plot) plot_reset_btn.on_click(plot_reset_btn_clicked) plot_fullscreen_btn = widgets.Button( tooltip="Fullscreen the plot", icon="arrows-alt", layout=widgets.Layout( height="28px", width="28px", padding="0px 0px 0px 0px" ), ) def plot_fullscreen_btn_clicked(b): m.sankee_plot.update_layout( width=1030, height=int(m.layout.height[:-2]) - 60, margin=dict(l=10, r=10, b=10, t=50, pad=5), ) with plot_output: plot_output.clear_output() display(m.sankee_plot) plot_fullscreen_btn.on_click(plot_fullscreen_btn_clicked) width_btn = widgets.Button( tooltip="Change plot width", icon="arrows-h", layout=widgets.Layout( height="28px", width="28px", padding="0px 0px 0px 0px" ), ) def width_btn_clicked(b): m.sankee_plot.update_layout( width=1030, margin=dict(l=10, r=10, b=10, t=50, pad=5), ) with plot_output: plot_output.clear_output() display(m.sankee_plot) width_btn.on_click(width_btn_clicked) height_btn = widgets.Button( tooltip="Change plot height", icon="arrows-v", layout=widgets.Layout( height="28px", width="28px", padding="0px 0px 0px 0px" ), ) def height_btn_clicked(b): m.sankee_plot.update_layout( height=int(m.layout.height[:-2]) - 60, margin=dict(l=10, r=10, b=10, t=50, pad=5), ) with plot_output: plot_output.clear_output() display(m.sankee_plot) height_btn.on_click(height_btn_clicked) width_slider = widgets.IntSlider( value=600, min=400, max=1030, step=10, description="", readout=False, continuous_update=False, layout=widgets.Layout(width="100px", padding=padding), style={"description_width": "initial"}, ) width_slider_label = widgets.Label( layout=widgets.Layout(padding="0px 10px 0px 0px") ) widgets.jslink((width_slider, "value"), (width_slider_label, "value")) def width_changed(change): if change["new"]: m.sankee_plot.update_layout( width=width_slider.value, margin=dict(l=10, r=10, b=10, t=50, pad=5), ) with plot_output: plot_output.clear_output() display(m.sankee_plot) width_slider.observe(width_changed, "value") height_slider = widgets.IntSlider( value=250, min=200, max=int(m.layout.height[:-2]) - 60, step=10, description="", readout=False, continuous_update=False, layout=widgets.Layout(width="100px", padding=padding), style={"description_width": "initial"}, ) height_slider_label = widgets.Label() widgets.jslink((height_slider, "value"), (height_slider_label, "value")) def height_changed(change): if change["new"]: m.sankee_plot.update_layout( height=height_slider.value, margin=dict(l=10, r=10, b=10, t=50, pad=5), ) with plot_output: plot_output.clear_output() display(m.sankee_plot) height_slider.observe(height_changed, "value") plot_output = widgets.Output() plot_widget = widgets.VBox([plot_output]) sankee_control = ipyleaflet.WidgetControl( widget=plot_widget, position="bottomright" ) m.add_control(sankee_control) m.sankee_control = sankee_control def handle_toolbar_event(event): if event["type"] == "mouseenter": toolbar_widget.children = [toolbar_header, toolbar_footer] elif event["type"] == "mouseleave": if not toolbar_button.value: toolbar_widget.children = [toolbar_button] toolbar_button.value = False close_button.value = False toolbar_event.on_dom_event(handle_toolbar_event) def toolbar_btn_click(change): if change["new"]: close_button.value = False toolbar_widget.children = [toolbar_header, toolbar_footer] else: if not close_button.value: toolbar_widget.children = [toolbar_button] toolbar_button.observe(toolbar_btn_click, "value") def close_btn_click(change): if change["new"]: toolbar_button.value = False if m is not None: m.toolbar_reset() if m.tool_control is not None and m.tool_control in m.controls: m.remove_control(m.tool_control) m.tool_control = None if m.sankee_control is not None and m.sankee_control in m.controls: m.remove_control(m.sankee_control) m.sankee_control = None toolbar_widget.close() close_button.observe(close_btn_click, "value") def button_clicked(change): if change["new"] == "Apply": with output: output.clear_output() plot_output.clear_output() print("Running ...") if m is not None: exclude_classes = [] if "NLCD" in dataset.value: before_img = ee.Image(f"USGS/NLCD/NLCD{before.value}") after_img = ee.Image(f"USGS/NLCD/NLCD{after.value}") vis_params = {} elif "MODIS" in dataset.value: before_img = ee.Image(f"MODIS/006/MCD12Q1/{before.value}_01_01") after_img = ee.Image(f"MODIS/006/MCD12Q1/{after.value}_01_01") vis_params = { "min": 1.0, "max": 17.0, "palette": [ "05450a", "086a10", "54a708", "78d203", "009900", "c6b044", "dcd159", "dade48", "fbff13", "b6ff05", "27ff87", "c24f44", "a5a5a5", "ff6d4c", "69fff8", "f9ffa4", "1c0dff", ], } elif "CGLS" in dataset.value: before_img = ee.Image( f"COPERNICUS/Landcover/100m/Proba-V-C3/Global/{before.value}" ) after_img = ee.Image( f"COPERNICUS/Landcover/100m/Proba-V-C3/Global/{after.value}" ) vis_params = {} elif "LCMS" in dataset.value: before_img = ee.Image( f"USFS/GTAC/LCMS/v2020-5/LCMS_CONUS_v2020-5_{before.value}" ) after_img = ee.Image( f"USFS/GTAC/LCMS/v2020-5/LCMS_CONUS_v2020-5_{after.value}" ) vis_params = {} # LCMS Land Cover class 15 is a no data mask and should be excluded exclude_classes.append(15) img_list = [before_img, after_img] label_list = [before.value, after.value] image1 = before_img.select(band_name[dataset.value]) image2 = after_img.select(band_name[dataset.value]) if region.value != "User-drawn ROI" or ( region.value == "User-drawn ROI" and m.user_roi is not None ): if region.value == "User-drawn ROI": geom = m.user_roi image1 = image1.clip(geom) image2 = image2.clip(geom) else: roi_object = m.ee_layer_dict[region.value]["ee_object"] if region.value == "Las Vegas": m.centerObject(roi_object, 10) if isinstance(roi_object, ee.Geometry): geom = roi_object image1 = image1.clip(geom) image2 = image2.clip(geom) else: roi_object = ee.FeatureCollection(roi_object) image1 = image1.clipToCollection(roi_object) image2 = image2.clipToCollection(roi_object) geom = roi_object.geometry() if len(title.value) > 0: plot_title = title.value else: plot_title = None m.default_style = {"cursor": "wait"} plot = sankee.sankify( img_list, geom, label_list, dataset_template[dataset.value], max_classes=classes.value, n=int(samples.value), title=plot_title, exclude=exclude_classes, ) output.clear_output() plot_output.clear_output() with plot_output: plot.update_layout( width=600, height=250, margin=dict(l=10, r=10, b=10, t=50, pad=5), ) plot_widget.children = [ widgets.HBox( [ plot_close_btn, plot_reset_btn, plot_fullscreen_btn, width_btn, width_slider, width_slider_label, height_btn, height_slider, height_slider_label, ] ), plot_output, ] display(plot) m.sankee_plot = plot m.addLayer(image1, vis_params, before.value) m.addLayer(image2, vis_params, after.value) m.default_style = {"cursor": "default"} else: with output: output.clear_output() print("Draw a polygon on the map.") elif change["new"] == "Reset": output.clear_output() plot_output.clear_output() plot_widget.children = [] elif change["new"] == "Close": if m is not None: m.toolbar_reset() if m.tool_control is not None and m.tool_control in m.controls: m.remove_control(m.tool_control) m.tool_control = None if m.sankee_control is not None and m.sankee_control in m.controls: m.remove_control(m.sankee_control) m.sankee_control = None toolbar_widget.close() buttons.value = None buttons.observe(button_clicked, "value") toolbar_button.value = True if m is not None: toolbar_control = ipyleaflet.WidgetControl( widget=toolbar_widget, position="topright" ) if toolbar_control not in m.controls: m.add_control(toolbar_control) m.tool_control = toolbar_control else: return toolbar_widget def split_basemaps( m, layers_dict=None, left_name=None, right_name=None, width="120px", **kwargs ): from .basemaps import basemap_tiles controls = m.controls layers = m.layers m.layers = [m.layers[0]] m.clear_controls() add_zoom = True add_fullscreen = True if layers_dict is None: layers_dict = {} keys = dict(basemap_tiles).keys() for key in keys: if isinstance(basemap_tiles[key], ipyleaflet.WMSLayer): pass else: layers_dict[key] = basemap_tiles[key] keys = list(layers_dict.keys()) if left_name is None: left_name = keys[0] if right_name is None: right_name = keys[-1] left_layer = layers_dict[left_name] right_layer = layers_dict[right_name] control = ipyleaflet.SplitMapControl(left_layer=left_layer, right_layer=right_layer) m.add_control(control) left_dropdown = widgets.Dropdown( options=keys, value=left_name, layout=widgets.Layout(width=width) ) left_control = ipyleaflet.WidgetControl(widget=left_dropdown, position="topleft") m.add_control(left_control) right_dropdown = widgets.Dropdown( options=keys, value=right_name, layout=widgets.Layout(width=width) ) right_control = ipyleaflet.WidgetControl(widget=right_dropdown, position="topright") m.add_control(right_control) close_button = widgets.ToggleButton( value=False, tooltip="Close the tool", icon="times", # button_style="primary", layout=widgets.Layout(height="28px", width="28px", padding="0px 0px 0px 4px"), ) def close_btn_click(change): if change["new"]: m.controls = controls m.layers = layers close_button.observe(close_btn_click, "value") close_control = ipyleaflet.WidgetControl( widget=close_button, position="bottomright" ) m.add_control(close_control) if add_zoom: m.add_control(ipyleaflet.ZoomControl()) if add_fullscreen: m.add_control(ipyleaflet.FullScreenControl()) m.add_control(ipyleaflet.ScaleControl(position="bottomleft")) split_control = None for ctrl in m.controls: if isinstance(ctrl, ipyleaflet.SplitMapControl): split_control = ctrl break def left_change(change): split_control.left_layer.url = layers_dict[left_dropdown.value].url left_dropdown.observe(left_change, "value") def right_change(change): split_control.right_layer.url = layers_dict[right_dropdown.value].url right_dropdown.observe(right_change, "value")
"""Module for dealing with the toolbar. """ import os import ee import ipyevents import ipyleaflet import ipywidgets as widgets from ipyfilechooser import FileChooser from IPython.core.display import display from .common import * from .timelapse import * def tool_template(m=None): widget_width = "250px" padding = "0px 0px 0px 5px" # upper, right, bottom, left toolbar_button = widgets.ToggleButton( value=False, tooltip="Toolbar", icon="gear", layout=widgets.Layout(width="28px", height="28px", padding="0px 0px 0px 4px"), ) close_button = widgets.ToggleButton( value=False, tooltip="Close the tool", icon="times", button_style="primary", layout=widgets.Layout(height="28px", width="28px", padding="0px 0px 0px 4px"), ) checkbox = widgets.Checkbox( description="Checkbox", indent=False, layout=widgets.Layout(padding=padding, width=widget_width), ) dropdown = widgets.Dropdown( options=["Option 1", "Option 2", "Option 3"], value=None, description="Dropdown:", layout=widgets.Layout(width=widget_width, padding=padding), style={"description_width": "initial"}, ) int_slider = widgets.IntSlider( min=1, max=100, description="Int Slider: ", readout=False, continuous_update=True, layout=widgets.Layout(width="220px", padding=padding), style={"description_width": "initial"}, ) int_slider_label = widgets.Label() widgets.jslink((int_slider, "value"), (int_slider_label, "value")) float_slider = widgets.FloatSlider( min=1, max=100, description="Float Slider: ", readout=False, continuous_update=True, layout=widgets.Layout(width="220px", padding=padding), style={"description_width": "initial"}, ) float_slider_label = widgets.Label() widgets.jslink((float_slider, "value"), (float_slider_label, "value")) color = widgets.ColorPicker( concise=False, description="Color:", value="white", style={"description_width": "initial"}, layout=widgets.Layout(width=widget_width, padding=padding), ) text = widgets.Text( value="", description="Textbox:", placeholder="Placeholder", style={"description_width": "initial"}, layout=widgets.Layout(width=widget_width, padding=padding), ) textarea = widgets.Textarea( placeholder="Placeholder", layout=widgets.Layout(width=widget_width), ) buttons = widgets.ToggleButtons( value=None, options=["Apply", "Reset", "Close"], tooltips=["Apply", "Reset", "Close"], button_style="primary", ) buttons.style.button_width = "80px" output = widgets.Output(layout=widgets.Layout(width=widget_width, padding=padding)) toolbar_widget = widgets.VBox() toolbar_widget.children = [toolbar_button] toolbar_header = widgets.HBox() toolbar_header.children = [close_button, toolbar_button] toolbar_footer = widgets.VBox() toolbar_footer.children = [ checkbox, widgets.HBox([int_slider, int_slider_label]), widgets.HBox([float_slider, float_slider_label]), dropdown, text, color, textarea, buttons, output, ] toolbar_event = ipyevents.Event( source=toolbar_widget, watched_events=["mouseenter", "mouseleave"] ) def handle_toolbar_event(event): if event["type"] == "mouseenter": toolbar_widget.children = [toolbar_header, toolbar_footer] elif event["type"] == "mouseleave": if not toolbar_button.value: toolbar_widget.children = [toolbar_button] toolbar_button.value = False close_button.value = False toolbar_event.on_dom_event(handle_toolbar_event) def toolbar_btn_click(change): if change["new"]: close_button.value = False toolbar_widget.children = [toolbar_header, toolbar_footer] else: if not close_button.value: toolbar_widget.children = [toolbar_button] toolbar_button.observe(toolbar_btn_click, "value") def close_btn_click(change): if change["new"]: toolbar_button.value = False if m is not None: m.toolbar_reset() if m.tool_control is not None and m.tool_control in m.controls: m.remove_control(m.tool_control) m.tool_control = None toolbar_widget.close() close_button.observe(close_btn_click, "value") def button_clicked(change): if change["new"] == "Apply": with output: output.clear_output() print("Running ...") elif change["new"] == "Reset": textarea.value = "" output.clear_output() elif change["new"] == "Close": if m is not None: m.toolbar_reset() if m.tool_control is not None and m.tool_control in m.controls: m.remove_control(m.tool_control) m.tool_control = None toolbar_widget.close() buttons.value = None buttons.observe(button_clicked, "value") toolbar_button.value = True if m is not None: toolbar_control = ipyleaflet.WidgetControl( widget=toolbar_widget, position="topright" ) if toolbar_control not in m.controls: m.add_control(toolbar_control) m.tool_control = toolbar_control else: return toolbar_widget def open_data_widget(m): """A widget for opening local vector/raster data. Args: m (object): geemap.Map """ padding = "0px 0px 0px 5px" style = {"description_width": "initial"} tool_output = widgets.Output() tool_output_ctrl = ipyleaflet.WidgetControl(widget=tool_output, position="topright") if m.tool_output_ctrl is not None and m.tool_output_ctrl in m.controls: m.remove_control(m.tool_output_ctrl) file_type = widgets.ToggleButtons( options=["Shapefile", "GeoJSON", "CSV", "Vector", "Raster"], tooltips=[ "Open a shapefile", "Open a GeoJSON file", "Open a vector dataset", "Create points from CSV", "Open a vector dataset", "Open a raster dataset", ], ) file_type.style.button_width = "88px" filepath = widgets.Text( value="", description="File path or http URL:", tooltip="Enter a file path or http URL to vector data", style=style, layout=widgets.Layout(width="454px", padding=padding), ) http_widget = widgets.HBox() file_chooser = FileChooser( os.getcwd(), sandbox_path=m.sandbox_path, layout=widgets.Layout(width="454px") ) file_chooser.filter_pattern = "*.shp" file_chooser.use_dir_icons = True style = {"description_width": "initial"} layer_name = widgets.Text( value="Shapefile", description="Enter a layer name:", tooltip="Enter a layer name for the selected file", style=style, layout=widgets.Layout(width="454px", padding="0px 0px 0px 5px"), ) longitude = widgets.Dropdown( options=[], value=None, description="Longitude:", layout=widgets.Layout(width="149px", padding="0px 0px 0px 5px"), style={"description_width": "initial"}, ) latitude = widgets.Dropdown( options=[], value=None, description="Latitude:", layout=widgets.Layout(width="149px", padding="0px 0px 0px 5px"), style={"description_width": "initial"}, ) label = widgets.Dropdown( options=[], value=None, description="Label:", layout=widgets.Layout(width="149px", padding="0px 0px 0px 5px"), style={"description_width": "initial"}, ) csv_widget = widgets.HBox() convert_bool = widgets.Checkbox( description="Convert to ee.FeatureCollection?", indent=False, layout=widgets.Layout(padding="0px 0px 0px 5px"), ) convert_hbox = widgets.HBox([convert_bool]) ok_cancel = widgets.ToggleButtons( value=None, options=["Apply", "Reset", "Close"], tooltips=["Apply", "Reset", "Close"], button_style="primary", ) # ok_cancel.style.button_width = "133px" bands = widgets.Text( value=None, description="Band:", tooltip="Enter a list of band indices", style=style, layout=widgets.Layout(width="150px", padding=padding), ) vmin = widgets.Text( value=None, description="vmin:", tooltip="Minimum value of the raster to visualize", style=style, layout=widgets.Layout(width="148px"), ) vmax = widgets.Text( value=None, description="vmax:", tooltip="Maximum value of the raster to visualize", style=style, layout=widgets.Layout(width="148px"), ) nodata = widgets.Text( value=None, description="Nodata:", tooltip="Nodata the raster to visualize", style=style, layout=widgets.Layout(width="150px", padding=padding), ) palette = widgets.Dropdown( options=[], value=None, description="palette:", layout=widgets.Layout(width="300px"), style=style, ) raster_options = widgets.VBox() main_widget = widgets.VBox( [ file_type, file_chooser, http_widget, csv_widget, layer_name, convert_hbox, raster_options, ok_cancel, ] ) tool_output.clear_output() with tool_output: display(main_widget) def bands_changed(change): if change["new"] and "," in change["owner"].value: palette.value = None palette.disabled = True else: palette.disabled = False bands.observe(bands_changed, "value") def chooser_callback(chooser): filepath.value = file_chooser.selected if file_type.value == "CSV": import pandas as pd df = pd.read_csv(filepath.value) col_names = df.columns.values.tolist() longitude.options = col_names latitude.options = col_names label.options = col_names if "longitude" in col_names: longitude.value = "longitude" if "latitude" in col_names: latitude.value = "latitude" if "name" in col_names: label.value = "name" file_chooser.register_callback(chooser_callback) def file_type_changed(change): ok_cancel.value = None file_chooser.default_path = os.getcwd() file_chooser.reset() layer_name.value = file_type.value csv_widget.children = [] filepath.value = "" if change["new"] == "Shapefile": file_chooser.filter_pattern = "*.shp" raster_options.children = [] convert_hbox.children = [convert_bool] http_widget.children = [] elif change["new"] == "GeoJSON": file_chooser.filter_pattern = "*.geojson" raster_options.children = [] convert_hbox.children = [convert_bool] http_widget.children = [filepath] elif change["new"] == "Vector": file_chooser.filter_pattern = "*.*" raster_options.children = [] convert_hbox.children = [convert_bool] http_widget.children = [filepath] elif change["new"] == "CSV": file_chooser.filter_pattern = ["*.csv", "*.CSV"] csv_widget.children = [longitude, latitude, label] raster_options.children = [] convert_hbox.children = [convert_bool] http_widget.children = [filepath] elif change["new"] == "Raster": file_chooser.filter_pattern = ["*.tif", "*.img"] palette.options = get_palettable(types=["matplotlib", "cartocolors"]) palette.value = None raster_options.children = [ widgets.HBox([bands, vmin, vmax]), widgets.HBox([nodata, palette]), ] convert_hbox.children = [] http_widget.children = [filepath] def ok_cancel_clicked(change): if change["new"] == "Apply": m.default_style = {"cursor": "wait"} file_path = filepath.value if file_path is not None: ext = os.path.splitext(file_path)[1] with tool_output: if ext.lower() == ".shp": if convert_bool.value: ee_object = shp_to_ee(file_path) m.addLayer(ee_object, {}, layer_name.value) else: m.add_shapefile( file_path, style={}, layer_name=layer_name.value ) elif ext.lower() == ".geojson": if convert_bool.value: ee_object = geojson_to_ee(file_path) m.addLayer(ee_object, {}, layer_name.value) else: m.add_geojson( file_path, style={}, layer_name=layer_name.value ) elif ext.lower() == ".csv": if convert_bool.value: ee_object = csv_to_ee( file_path, latitude.value, longitude.value ) m.addLayer(ee_object, {}, layer_name.value) else: m.add_xy_data( file_path, x=longitude.value, y=latitude.value, label=label.value, layer_name=layer_name.value, ) elif ext.lower() in [".tif", "img"] and file_type.value == "Raster": band = None vis_min = None vis_max = None vis_nodata = None try: if len(bands.value) > 0: band = int(bands.value) if len(vmin.value) > 0: vis_min = float(vmin.value) if len(vmax.value) > 0: vis_max = float(vmax.value) if len(nodata.value) > 0: vis_nodata = float(nodata.value) except: pass m.add_local_tile( file_path, layer_name=layer_name.value, band=band, palette=palette.value, vmin=vis_min, vmax=vis_max, nodata=vis_nodata, ) else: m.add_vector(file_path, style={}, layer_name=layer_name.value) else: print("Please select a file to open.") m.toolbar_reset() m.default_style = {"cursor": "default"} elif change["new"] == "Reset": file_chooser.reset() tool_output.clear_output() with tool_output: display(main_widget) m.toolbar_reset() elif change["new"] == "Close": if m.tool_output_ctrl is not None and m.tool_output_ctrl in m.controls: m.remove_control(m.tool_output_ctrl) m.tool_output_ctrl = None m.toolbar_reset() ok_cancel.value = None file_type.observe(file_type_changed, names="value") ok_cancel.observe(ok_cancel_clicked, names="value") # file_chooser.register_callback(chooser_callback) m.add_control(tool_output_ctrl) m.tool_output_ctrl = tool_output_ctrl def change_basemap(m): """Widget for change basemaps. Args: m (object): geemap.Map() """ from .basemaps import _ee_basemaps dropdown = widgets.Dropdown( options=list(_ee_basemaps.keys()), value="ROADMAP", layout=widgets.Layout(width="200px") # description="Basemaps", ) close_btn = widgets.Button( icon="times", tooltip="Close the basemap widget", button_style="primary", layout=widgets.Layout(width="32px"), ) basemap_widget = widgets.HBox([dropdown, close_btn]) def on_click(change): basemap_name = change["new"] if len(m.layers) == 1: old_basemap = m.layers[0] else: old_basemap = m.layers[1] m.substitute_layer(old_basemap, _ee_basemaps[basemap_name]) dropdown.observe(on_click, "value") def close_click(change): m.toolbar_reset() if m.basemap_ctrl is not None and m.basemap_ctrl in m.controls: m.remove_control(m.basemap_ctrl) basemap_widget.close() close_btn.on_click(close_click) basemap_control = ipyleaflet.WidgetControl( widget=basemap_widget, position="topright" ) m.add_control(basemap_control) m.basemap_ctrl = basemap_control def convert_js2py(m): """A widget for converting Earth Engine JavaScript to Python. Args: m (object): geemap.Map """ full_widget = widgets.VBox(layout=widgets.Layout(width="465px", height="350px")) text_widget = widgets.Textarea( placeholder="Paste your Earth Engine JavaScript into this textbox and click the Convert button below to convert the Javascript to Python", layout=widgets.Layout(width="455px", height="310px"), ) buttons = widgets.ToggleButtons( value=None, options=["Convert", "Clear", "Close"], tooltips=["Convert", "Clear", "Close"], button_style="primary", ) buttons.style.button_width = "142px" def button_clicked(change): if change["new"] == "Convert": from .conversion import create_new_cell, js_snippet_to_py if len(text_widget.value) > 0: out_lines = js_snippet_to_py( text_widget.value, add_new_cell=False, import_ee=False, import_geemap=False, show_map=False, ) if len(out_lines) > 0 and len(out_lines[0].strip()) == 0: out_lines = out_lines[1:] text_widget.value = "".join(out_lines) create_code_cell(text_widget.value) elif change["new"] == "Clear": text_widget.value = "" elif change["new"] == "Close": m.toolbar_reset() if m.convert_ctrl is not None and m.convert_ctrl in m.controls: m.remove_control(m.convert_ctrl) full_widget.close() buttons.value = None buttons.observe(button_clicked, "value") full_widget.children = [text_widget, buttons] widget_control = ipyleaflet.WidgetControl(widget=full_widget, position="topright") m.add_control(widget_control) m.convert_ctrl = widget_control def collect_samples(m): full_widget = widgets.VBox() layout = widgets.Layout(width="100px") prop_label = widgets.Label( value="Property", layout=widgets.Layout(display="flex", justify_content="center", width="100px"), ) value_label = widgets.Label( value="Value", layout=widgets.Layout(display="flex", justify_content="center", width="100px"), ) color_label = widgets.Label( value="Color", layout=widgets.Layout(display="flex", justify_content="center", width="100px"), ) prop_text1 = widgets.Text(layout=layout, placeholder="Required") value_text1 = widgets.Text(layout=layout, placeholder="Integer") prop_text2 = widgets.Text(layout=layout, placeholder="Optional") value_text2 = widgets.Text(layout=layout, placeholder="String") color = widgets.ColorPicker( concise=False, value="#3388ff", layout=layout, style={"description_width": "initial"}, ) buttons = widgets.ToggleButtons( value=None, options=["Apply", "Clear", "Close"], tooltips=["Apply", "Clear", "Close"], button_style="primary", ) buttons.style.button_width = "99px" def button_clicked(change): if change["new"] == "Apply": if len(color.value) != 7: color.value = "#3388ff" draw_control = ipyleaflet.DrawControl( marker={"shapeOptions": {"color": color.value}, "repeatMode": True}, rectangle={"shapeOptions": {"color": color.value}, "repeatMode": True}, polygon={"shapeOptions": {"color": color.value}, "repeatMode": True}, circlemarker={}, polyline={}, edit=False, remove=False, ) controls = [] old_draw_control = None for control in m.controls: if isinstance(control, ipyleaflet.DrawControl): controls.append(draw_control) old_draw_control = control else: controls.append(control) m.controls = tuple(controls) old_draw_control.close() m.draw_control = draw_control train_props = {} if prop_text1.value != "" and value_text1.value != "": try: _ = int(value_text1.value) except Exception as _: value_text1.placeholder = "Integer only" value_text1.value = "" return train_props[prop_text1.value] = int(value_text1.value) if prop_text2.value != "" and value_text2.value != "": train_props[prop_text2.value] = value_text2.value if color.value != "": train_props["color"] = color.value # Handles draw events def handle_draw(target, action, geo_json): from .geemap import ee_tile_layer try: geom = geojson_to_ee(geo_json, False) m.user_roi = geom if len(train_props) > 0: feature = ee.Feature(geom, train_props) else: feature = ee.Feature(geom) m.draw_last_json = geo_json m.draw_last_feature = feature if action == "deleted" and len(m.draw_features) > 0: m.draw_features.remove(feature) m.draw_count -= 1 else: m.draw_features.append(feature) m.draw_count += 1 collection = ee.FeatureCollection(m.draw_features) m.user_rois = collection ee_draw_layer = ee_tile_layer( collection, {"color": "blue"}, "Drawn Features", False, 0.5 ) draw_layer_index = m.find_layer_index("Drawn Features") if draw_layer_index == -1: m.add_layer(ee_draw_layer) m.draw_layer = ee_draw_layer else: m.substitute_layer(m.draw_layer, ee_draw_layer) m.draw_layer = ee_draw_layer except Exception as e: m.draw_count = 0 m.draw_features = [] m.draw_last_feature = None m.draw_layer = None m.user_roi = None m.roi_start = False m.roi_end = False print("There was an error creating Earth Engine Feature.") raise Exception(e) draw_control.on_draw(handle_draw) elif change["new"] == "Clear": prop_text1.value = "" value_text1.value = "" prop_text2.value = "" value_text2.value = "" color.value = "#3388ff" elif change["new"] == "Close": m.toolbar_reset() if m.training_ctrl is not None and m.training_ctrl in m.controls: m.remove_control(m.training_ctrl) full_widget.close() buttons.value = None buttons.observe(button_clicked, "value") full_widget.children = [ widgets.HBox([prop_label, value_label, color_label]), widgets.HBox([prop_text1, value_text1, color]), widgets.HBox([prop_text2, value_text2, color]), buttons, ] widget_control = ipyleaflet.WidgetControl(widget=full_widget, position="topright") m.add_control(widget_control) m.training_ctrl = widget_control def get_tools_dict(): import pandas as pd import pkg_resources pkg_dir = os.path.dirname(pkg_resources.resource_filename("geemap", "geemap.py")) toolbox_csv = os.path.join(pkg_dir, "data/template/toolbox.csv") df = pd.read_csv(toolbox_csv).set_index("index") tools_dict = df.to_dict("index") return tools_dict def tool_gui(tool_dict, max_width="420px", max_height="600px"): """Create a GUI for a tool based on the tool dictionary. Args: tool_dict (dict): The dictionary containing the tool info. max_width (str, optional): The max width of the tool dialog. max_height (str, optional): The max height of the tool dialog. Returns: object: An ipywidget object representing the tool interface. """ tool_widget = widgets.VBox( layout=widgets.Layout(max_width=max_width, max_height=max_height) ) children = [] args = {} required_inputs = [] style = {"description_width": "initial"} max_width = str(int(max_width.replace("px", "")) - 10) + "px" header_width = str(int(max_width.replace("px", "")) - 104) + "px" header = widgets.Label( value=f'Current Tool: {tool_dict["label"]}', style=style, layout=widgets.Layout(width=header_width), ) code_btn = widgets.Button( description="View Code", layout=widgets.Layout(width="100px") ) children.append(widgets.HBox([header, code_btn])) desc = widgets.Textarea( value=f'Description: {tool_dict["description"]}', layout=widgets.Layout(width="410px", max_width=max_width), disabled=True, ) children.append(desc) run_btn = widgets.Button(description="Run", layout=widgets.Layout(width="100px")) cancel_btn = widgets.Button( description="Cancel", layout=widgets.Layout(width="100px") ) help_btn = widgets.Button(description="Help", layout=widgets.Layout(width="100px")) import_btn = widgets.Button( description="Import", tooltip="Import the script to a new cell", layout=widgets.Layout(width="98px"), ) tool_output = widgets.Output(layout=widgets.Layout(max_height="200px")) children.append(widgets.HBox([run_btn, cancel_btn, help_btn, import_btn])) children.append(tool_output) tool_widget.children = children def run_button_clicked(b): tool_output.clear_output() required_params = required_inputs.copy() args2 = [] for arg in args: line = "" if isinstance(args[arg], FileChooser): if arg in required_params and args[arg].selected is None: with tool_output: print(f"Please provide inputs for required parameters.") break elif arg in required_params: required_params.remove(arg) if arg == "i": line = f"-{arg}={args[arg].selected}" else: line = f"--{arg}={args[arg].selected}" elif isinstance(args[arg], widgets.Text): if arg in required_params and len(args[arg].value) == 0: with tool_output: print(f"Please provide inputs for required parameters.") break elif arg in required_params: required_params.remove(arg) if args[arg].value is not None and len(args[arg].value) > 0: line = f"--{arg}={args[arg].value}" elif isinstance(args[arg], widgets.Checkbox): line = f"--{arg}={args[arg].value}" args2.append(line) if len(required_params) == 0: with tool_output: # wbt.run_tool(tool_dict["name"], args2) pass def help_button_clicked(b): import webbrowser tool_output.clear_output() with tool_output: html = widgets.HTML( value=f'<a href={tool_dict["link"]} target="_blank">{tool_dict["link"]}</a>' ) display(html) webbrowser.open_new_tab(tool_dict["link"]) def code_button_clicked(b): import webbrowser with tool_output: html = widgets.HTML( value=f'<a href={tool_dict["link"]} target="_blank">{tool_dict["link"]}</a>' ) display(html) webbrowser.open_new_tab(tool_dict["link"]) def cancel_btn_clicked(b): tool_output.clear_output() def import_button_clicked(b): tool_output.clear_output() content = [] create_code_cell("\n".join(content)) import_btn.on_click(import_button_clicked) run_btn.on_click(run_button_clicked) help_btn.on_click(help_button_clicked) code_btn.on_click(code_button_clicked) cancel_btn.on_click(cancel_btn_clicked) return tool_widget def build_toolbox(tools_dict, max_width="1080px", max_height="600px"): """Build the GEE toolbox. Args: tools_dict (dict): A dictionary containing information for all tools. max_width (str, optional): The maximum width of the widget. max_height (str, optional): The maximum height of the widget. Returns: object: An ipywidget representing the toolbox. """ left_widget = widgets.VBox(layout=widgets.Layout(min_width="175px")) center_widget = widgets.VBox( layout=widgets.Layout(min_width="200px", max_width="200px") ) right_widget = widgets.Output( layout=widgets.Layout(width="630px", max_height=max_height) ) full_widget = widgets.HBox( [left_widget, center_widget, right_widget], layout=widgets.Layout(max_width=max_width, max_height=max_height), ) search_widget = widgets.Text( placeholder="Search tools ...", layout=widgets.Layout(width="170px") ) label_widget = widgets.Label(layout=widgets.Layout(width="170px")) label_widget.value = f"{len(tools_dict)} Available Tools" close_btn = widgets.Button( description="Close Toolbox", icon="close", layout=widgets.Layout(width="170px") ) categories = {} categories["All Tools"] = [] for key in tools_dict.keys(): category = tools_dict[key]["category"] if category not in categories.keys(): categories[category] = [] categories[category].append(tools_dict[key]["name"]) categories["All Tools"].append(tools_dict[key]["name"]) options = list(categories.keys()) all_tools = categories["All Tools"] all_tools.sort() category_widget = widgets.Select( options=options, layout=widgets.Layout(width="170px", height="165px") ) tools_widget = widgets.Select( options=[], layout=widgets.Layout(width="195px", height="400px") ) def category_selected(change): if change["new"]: selected = change["owner"].value options = categories[selected] options.sort() tools_widget.options = options label_widget.value = f"{len(options)} Available Tools" category_widget.observe(category_selected, "value") def tool_selected(change): if change["new"]: selected = change["owner"].value tool_dict = tools_dict[selected] with right_widget: right_widget.clear_output() display(tool_gui(tool_dict, max_height=max_height)) tools_widget.observe(tool_selected, "value") def search_changed(change): if change["new"]: keyword = change["owner"].value if len(keyword) > 0: selected_tools = [] for tool in all_tools: if keyword.lower() in tool.lower(): selected_tools.append(tool) if len(selected_tools) > 0: tools_widget.options = selected_tools label_widget.value = f"{len(selected_tools)} Available Tools" else: tools_widget.options = all_tools label_widget.value = f"{len(tools_dict)} Available Tools" search_widget.observe(search_changed, "value") def close_btn_clicked(b): full_widget.close() close_btn.on_click(close_btn_clicked) category_widget.value = list(categories.keys())[0] tools_widget.options = all_tools left_widget.children = [category_widget, search_widget, label_widget, close_btn] center_widget.children = [tools_widget] return full_widget def timelapse_gui(m=None): """Creates timelapse animations. Args: m (geemap.Map, optional): A geemap Map instance. Defaults to None. Returns: ipywidgets: The interactive GUI. """ if m is not None: m.add_basemap("HYBRID") widget_width = "350px" padding = "0px 0px 0px 5px" # upper, right, bottom, left style = {"description_width": "initial"} toolbar_button = widgets.ToggleButton( value=False, tooltip="Toolbar", icon="gear", layout=widgets.Layout(width="28px", height="28px", padding="0px 0px 0px 4px"), ) close_button = widgets.ToggleButton( value=False, tooltip="Close the tool", icon="times", button_style="primary", layout=widgets.Layout(height="28px", width="28px", padding="0px 0px 0px 4px"), ) collection = widgets.Dropdown( options=[ "Landsat TM-ETM-OLI Surface Reflectance", "Sentinel-2AB Surface Reflectance", "MODIS", ], value="Landsat TM-ETM-OLI Surface Reflectance", description="Collection:", layout=widgets.Layout(width=widget_width, padding=padding), style=style, ) title = widgets.Text( value="Timelapse", description="Title:", style=style, layout=widgets.Layout(width="181px", padding=padding), ) bands = widgets.Dropdown( description="RGB:", options=[ "Red/Green/Blue", "NIR/Red/Green", "SWIR2/SWIR1/NIR", "NIR/SWIR1/Red", "SWIR2/NIR/Red", "SWIR2/SWIR1/Red", "SWIR1/NIR/Blue", "NIR/SWIR1/Blue", "SWIR2/NIR/Green", "SWIR1/NIR/Red", ], value="NIR/Red/Green", style=style, layout=widgets.Layout(width="165px", padding=padding), ) speed = widgets.IntSlider( description="Frames/sec:", tooltip="Frames per second", value=10, min=1, max=30, readout=False, style=style, layout=widgets.Layout(width="142px", padding=padding), ) speed_label = widgets.Label( layout=widgets.Layout(width="20px", padding=padding), ) widgets.jslink((speed, "value"), (speed_label, "value")) cloud = widgets.Checkbox( value=True, description="Apply fmask (remove clouds, shadows, snow)", tooltip="Apply fmask (remove clouds, shadows, snow)", style=style, ) start_year = widgets.IntSlider( description="Start Year:", value=1984, min=1984, max=2021, readout=False, style=style, layout=widgets.Layout(width="138px", padding=padding), ) start_year_label = widgets.Label() widgets.jslink((start_year, "value"), (start_year_label, "value")) end_year = widgets.IntSlider( description="End Year:", value=2020, min=1984, max=2021, readout=False, style=style, layout=widgets.Layout(width="138px", padding=padding), ) end_year_label = widgets.Label() widgets.jslink((end_year, "value"), (end_year_label, "value")) start_month = widgets.IntSlider( description="Start Month:", value=5, min=1, max=12, readout=False, style=style, layout=widgets.Layout(width="145px", padding=padding), ) start_month_label = widgets.Label( layout=widgets.Layout(width="20px", padding=padding), ) widgets.jslink((start_month, "value"), (start_month_label, "value")) end_month = widgets.IntSlider( description="End Month:", value=10, min=1, max=12, readout=False, style=style, layout=widgets.Layout(width="155px", padding=padding), ) end_month_label = widgets.Label() widgets.jslink((end_month, "value"), (end_month_label, "value")) font_size = widgets.IntSlider( description="Font size:", value=30, min=10, max=50, readout=False, style=style, layout=widgets.Layout(width="152px", padding=padding), ) font_size_label = widgets.Label() widgets.jslink((font_size, "value"), (font_size_label, "value")) font_color = widgets.ColorPicker( concise=False, description="Font color:", value="white", style=style, layout=widgets.Layout(width="170px", padding=padding), ) progress_bar_color = widgets.ColorPicker( concise=False, description="Progress bar:", value="blue", style=style, layout=widgets.Layout(width="180px", padding=padding), ) # Normalized Satellite Indices: https://www.usna.edu/Users/oceano/pguth/md_help/html/norm_sat.htm nd_options = [ "Vegetation Index (NDVI)", "Water Index (NDWI)", "Modified Water Index (MNDWI)", "Snow Index (NDSI)", "Soil Index (NDSI)", "Burn Ratio (NBR)", "Customized", ] nd_indices = widgets.Dropdown( options=nd_options, value=None, description="Normalized Difference Index:", style=style, layout=widgets.Layout(width="347px", padding=padding), ) first_band = widgets.Dropdown( description="1st band:", options=["Blue", "Green", "Red", "NIR", "SWIR1", "SWIR2"], value=None, style=style, layout=widgets.Layout(width="171px", padding=padding), ) second_band = widgets.Dropdown( description="2nd band:", options=["Blue", "Green", "Red", "NIR", "SWIR1", "SWIR2"], value=None, style=style, layout=widgets.Layout(width="172px", padding=padding), ) nd_threshold = widgets.FloatSlider( value=0, min=-1, max=1, step=0.01, description="Threshold:", orientation="horizontal", readout=False, style=style, layout=widgets.Layout(width="159px", padding=padding), ) nd_threshold_label = widgets.Label( layout=widgets.Layout(width="35px", padding=padding), ) widgets.jslink((nd_threshold, "value"), (nd_threshold_label, "value")) nd_color = widgets.ColorPicker( concise=False, description="Color:", value="blue", style=style, layout=widgets.Layout(width="145px", padding=padding), ) def nd_index_change(change): if nd_indices.value == "Vegetation Index (NDVI)": first_band.value = "NIR" second_band.value = "Red" elif nd_indices.value == "Water Index (NDWI)": first_band.value = "NIR" second_band.value = "SWIR1" elif nd_indices.value == "Modified Water Index (MNDWI)": first_band.value = "Green" second_band.value = "SWIR1" elif nd_indices.value == "Snow Index (NDSI)": first_band.value = "Green" second_band.value = "SWIR1" elif nd_indices.value == "Soil Index (NDSI)": first_band.value = "SWIR1" second_band.value = "NIR" elif nd_indices.value == "Burn Ratio (NBR)": first_band.value = "NIR" second_band.value = "SWIR2" elif nd_indices.value == "Customized": first_band.value = None second_band.value = None nd_indices.observe(nd_index_change, names="value") button_width = "113px" create_gif = widgets.Button( description="Create timelapse", button_style="primary", tooltip="Click to create timelapse", style=style, layout=widgets.Layout(padding="0px", width=button_width), ) def submit_clicked(b): if start_year.value > end_year.value: print("The end year must be great than the start year.") return if start_month.value > end_month.value: print("The end month must be great than the start month.") return if start_year.value == end_year.value: add_progress_bar = False else: add_progress_bar = True start_date = str(start_month.value).zfill(2) + "-01" end_date = str(end_month.value).zfill(2) + "-30" with output: print("Computing... Please wait...") nd_bands = None if (first_band.value is not None) and (second_band.value is not None): nd_bands = [first_band.value, second_band.value] temp_output = widgets.Output() if m is not None: out_dir = os.path.expanduser("~/Downloads") if not os.path.exists(out_dir): os.makedirs(out_dir) out_gif = os.path.join(out_dir, "timelapse_" + random_string(3) + ".gif") with temp_output: temp_output.clear_output() m.add_landsat_ts_gif( roi=m.user_roi, label=title.value, start_year=start_year.value, end_year=end_year.value, start_date=start_date, end_date=end_date, bands=bands.value.split("/"), font_color=font_color.value, frames_per_second=speed.value, font_size=font_size.value, add_progress_bar=add_progress_bar, progress_bar_color=progress_bar_color.value, out_gif=out_gif, apply_fmask=cloud.value, nd_bands=nd_bands, nd_threshold=nd_threshold.value, nd_palette=["black", nd_color.value], ) if m.user_roi is not None: m.centerObject(m.user_roi) with output: print("The timelapse has been added to the map.") link = create_download_link( out_gif, title="Click here to download: ", ) display(link) if nd_bands is not None: link_nd = create_download_link( out_gif.replace(".gif", "_nd.gif"), title="Click here to download: ", ) display(link_nd) create_gif.on_click(submit_clicked) reset_btn = widgets.Button( description="Reset", button_style="primary", style=style, layout=widgets.Layout(padding="0px", width=button_width), ) def reset_btn_click(change): output.clear_output() reset_btn.on_click(reset_btn_click) close_btn = widgets.Button( description="Close", button_style="primary", style=style, layout=widgets.Layout(padding="0px", width=button_width), ) def close_click(change): if m is not None: m.toolbar_reset() if m.tool_control is not None and m.tool_control in m.controls: m.remove_control(m.tool_control) m.tool_control = None toolbar_widget.close() close_btn.on_click(close_click) output = widgets.Output(layout=widgets.Layout(width=widget_width, padding=padding)) toolbar_widget = widgets.VBox() toolbar_widget.children = [toolbar_button] toolbar_header = widgets.HBox() toolbar_header.children = [close_button, toolbar_button] toolbar_footer = widgets.VBox() toolbar_footer.children = [ collection, widgets.HBox([title, bands]), widgets.HBox([speed, speed_label, progress_bar_color]), widgets.HBox([start_year, start_year_label, end_year, end_year_label]), widgets.HBox([start_month, start_month_label, end_month, end_month_label]), widgets.HBox([font_size, font_size_label, font_color]), cloud, nd_indices, widgets.HBox([first_band, second_band]), widgets.HBox([nd_threshold, nd_threshold_label, nd_color]), widgets.HBox([create_gif, reset_btn, close_btn]), output, ] toolbar_event = ipyevents.Event( source=toolbar_widget, watched_events=["mouseenter", "mouseleave"] ) def handle_toolbar_event(event): if event["type"] == "mouseenter": toolbar_widget.children = [toolbar_header, toolbar_footer] elif event["type"] == "mouseleave": if not toolbar_button.value: toolbar_widget.children = [toolbar_button] toolbar_button.value = False close_button.value = False toolbar_event.on_dom_event(handle_toolbar_event) def toolbar_btn_click(change): if change["new"]: close_button.value = False toolbar_widget.children = [toolbar_header, toolbar_footer] else: if not close_button.value: toolbar_widget.children = [toolbar_button] toolbar_button.observe(toolbar_btn_click, "value") def close_btn_click(change): if change["new"]: toolbar_button.value = False if m is not None: if m.tool_control is not None and m.tool_control in m.controls: m.remove_control(m.tool_control) m.tool_control = None m.toolbar_reset() toolbar_widget.close() close_button.observe(close_btn_click, "value") toolbar_button.value = True if m is not None: toolbar_control = ipyleaflet.WidgetControl( widget=toolbar_widget, position="topright" ) if toolbar_control not in m.controls: m.add_control(toolbar_control) m.tool_control = toolbar_control else: return toolbar_widget def time_slider(m=None): """Creates a time slider for visualizing any ee.ImageCollection. Args: m (geemap.Map, optional): A geemap Map instance. Defaults to None. Returns: ipywidgets: The interactive GUI. """ import matplotlib as mpl import matplotlib.pyplot as plt widget_width = "350px" padding = "0px 0px 0px 5px" # upper, right, bottom, left style = {"description_width": "initial"} toolbar_button = widgets.ToggleButton( value=False, tooltip="Toolbar", icon="fast-forward", layout=widgets.Layout(width="28px", height="28px", padding="0px 0px 0px 4px"), ) close_button = widgets.ToggleButton( value=False, tooltip="Close the tool", icon="times", button_style="primary", layout=widgets.Layout(height="28px", width="28px", padding="0px 0px 0px 4px"), ) col_options_dict = { "Landsat TM-ETM-OLI Surface Reflectance": { "min": 0, "max": 4000, "bands": ["NIR", "Red", "Green"], "start_year": 1984, "end_year": 2021, "bandnames": ["Blue", "Green", "Red", "NIR", "SWIR1", "SWIR2", "pixel_qa"], }, "MOD13A2.006 Terra Vegetation Indices": { "min": 0, "max": 9000, "start_year": 2000, "end_year": 2021, "palette": [ "FFFFFF", "CE7E45", "DF923D", "F1B555", "FCD163", "99B718", "74A901", "66A000", "529400", "3E8601", "207401", "056201", "004C00", "023B01", "012E01", "011D01", "011301", ], }, "Sentinel-2 Surface Relectance": { "min": 0, "max": 4000, "bands": ["NIR", "Red", "Green"], "start_year": 2015, "end_year": 2021, "bandnames": [ "Blue", "Green", "Red", "Red Edge 1", "Red Edge 2", "Red Edge 3", "NIR", "Red Edge 4", "SWIR1", "SWIR2", "QA60", ], }, "USDA NAIP Imagery": { "min": 0, "max": 255, "bands": ["R", "G", "B"], "start_year": 2003, "end_year": 2021, "bandnames": ["R", "G", "B", "N"], }, } col_options = list(col_options_dict.keys()) if m is not None: col_options += m.ee_raster_layer_names collection = widgets.Dropdown( options=col_options, value=col_options[0], description="Time series:", layout=widgets.Layout(width=widget_width, padding=padding), style=style, ) region = widgets.Dropdown( options=["User-drawn ROI"] + m.ee_vector_layer_names, value="User-drawn ROI", description="Region:", layout=widgets.Layout(width=widget_width, padding=padding), style=style, ) dropdown_width = "97px" landsat_bands = ["Blue", "Green", "Red", "NIR", "SWIR1", "SWIR2", "pixel_qa"] band1_dropdown = widgets.Dropdown( options=landsat_bands, value="NIR", layout=widgets.Layout(width=dropdown_width), ) band2_dropdown = widgets.Dropdown( options=landsat_bands, value="Red", layout=widgets.Layout(width=dropdown_width), ) band3_dropdown = widgets.Dropdown( options=landsat_bands, value="Green", layout=widgets.Layout(width=dropdown_width), ) bands_label = widgets.Label("Bands:", layout=widgets.Layout(padding=padding)) bands_hbox = widgets.HBox( [bands_label, band1_dropdown, band2_dropdown, band3_dropdown] ) vis = widgets.Text( value="", description="Vis min value:", placeholder="{'min': 0, 'max': 1, 'palette': ['red', 'blue']}", style=style, layout=widgets.Layout(width=widget_width, padding=padding), ) vis_min = widgets.Text( value="0", description="Vis min value:", style=style, layout=widgets.Layout(width="172px", padding=padding), ) vis_max = widgets.Text( value="4000", description="Vis max value:", style=style, layout=widgets.Layout(width="172px", padding=padding), ) opacity = widgets.FloatSlider( value=1, min=0, max=1, step=0.01, description="Opacity:", continuous_update=True, readout=False, readout_format=".2f", layout=widgets.Layout(width="130px", padding=padding), style={"description_width": "50px"}, ) opacity_label = widgets.Label(layout=widgets.Layout(width="40px", padding=padding)) widgets.jslink((opacity, "value"), (opacity_label, "value")) gamma = widgets.FloatSlider( value=1, min=0.1, max=10, step=0.01, description="Gamma:", continuous_update=True, readout=False, readout_format=".2f", layout=widgets.Layout(width="123px", padding=padding), style={"description_width": "50px"}, ) gamma_label = widgets.Label(layout=widgets.Layout(width="40px", padding=padding)) widgets.jslink((gamma, "value"), (gamma_label, "value")) color_picker = widgets.ColorPicker( concise=False, value="#000000", layout=widgets.Layout(width="97px"), style={"description_width": "initial"}, ) add_color = widgets.Button( icon="plus", tooltip="Add a hex color string to the palette", layout=widgets.Layout(width="32px"), ) del_color = widgets.Button( icon="minus", tooltip="Remove a hex color string from the palette", layout=widgets.Layout(width="32px"), ) reset_color = widgets.Button( icon="eraser", tooltip="Remove all color strings from the palette", layout=widgets.Layout(width="34px"), ) classes = widgets.Dropdown( options=["Any"] + [str(i) for i in range(3, 13)], description="Classes:", layout=widgets.Layout(width="150px", padding=padding), style={"description_width": "initial"}, ) colormap = widgets.Dropdown( options=plt.colormaps(), value=None, description="Colormap:", layout=widgets.Layout(width="195px", padding=padding), style={"description_width": "initial"}, ) def classes_changed(change): if change["new"]: selected = change["owner"].value if colormap.value is not None: n_class = None if selected != "Any": n_class = int(classes.value) colors = plt.cm.get_cmap(colormap.value, n_class) cmap_colors = [ mpl.colors.rgb2hex(colors(i))[1:] for i in range(colors.N) ] _, ax = plt.subplots(figsize=(6, 0.4)) cmap = mpl.colors.LinearSegmentedColormap.from_list( "custom", to_hex_colors(cmap_colors), N=256 ) vmin = 0 vmax = 1 try: if vis_min.value != "": vmin = float(vis_min.value) if vis_max.value != "": vmax = float(vis_max.value) except Exception as _: pass norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax) mpl.colorbar.ColorbarBase( ax, norm=norm, cmap=cmap, orientation="horizontal" ) palette.value = ", ".join([color for color in cmap_colors]) if m.colorbar_widget is None: m.colorbar_widget = widgets.Output( layout=widgets.Layout(height="60px") ) if m.colorbar_ctrl is None: m.colorbar_ctrl = ipyleaflet.WidgetControl( widget=m.colorbar_widget, position="bottomright" ) m.add_control(m.colorbar_ctrl) colorbar_output = m.colorbar_widget with colorbar_output: colorbar_output.clear_output() plt.show() classes.observe(classes_changed, "value") palette = widgets.Text( value="", placeholder="", description="Palette:", tooltip="Enter a list of hex color code (RRGGBB)", layout=widgets.Layout(width="137px", padding=padding), style={"description_width": "initial"}, ) def add_color_clicked(b): if color_picker.value is not None: if len(palette.value) == 0: palette.value = color_picker.value[1:] else: palette.value += ", " + color_picker.value[1:] def del_color_clicked(b): if "," in palette.value: items = [item.strip() for item in palette.value.split(",")] palette.value = ", ".join(items[:-1]) else: palette.value = "" def reset_color_clicked(b): palette.value = "" add_color.on_click(add_color_clicked) del_color.on_click(del_color_clicked) reset_color.on_click(reset_color_clicked) def colormap_changed(change): if change["new"]: n_class = None if classes.value != "Any": n_class = int(classes.value) colors = plt.cm.get_cmap(colormap.value, n_class) cmap_colors = [mpl.colors.rgb2hex(colors(i))[1:] for i in range(colors.N)] _, ax = plt.subplots(figsize=(6, 0.4)) cmap = mpl.colors.LinearSegmentedColormap.from_list( "custom", to_hex_colors(cmap_colors), N=256 ) vmin = 0 vmax = 1 try: if vis_min.value != "": vmin = float(vis_min.value) if vis_max.value != "": vmax = float(vis_max.value) except Exception as _: pass norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax) mpl.colorbar.ColorbarBase( ax, norm=norm, cmap=cmap, orientation="horizontal" ) palette.value = ", ".join(cmap_colors) if m.colorbar_widget is None: m.colorbar_widget = widgets.Output(layout=widgets.Layout(height="60px")) if m.colorbar_ctrl is None: m.colorbar_ctrl = ipyleaflet.WidgetControl( widget=m.colorbar_widget, position="bottomright" ) m.add_control(m.colorbar_ctrl) colorbar_output = m.colorbar_widget with colorbar_output: colorbar_output.clear_output() plt.show() colormap.observe(colormap_changed, "value") palette_vbox = widgets.VBox() labels = widgets.Text( value=", ".join([str(i) for i in range(1984, 2021)]), description="Labels:", style=style, layout=widgets.Layout(width="150px", padding=padding), ) speed = widgets.FloatSlider( description="Speed (sec):", tooltip="Time interval in seconds", value=1, min=0.1, max=10, readout=False, style=style, layout=widgets.Layout(width="160px", padding=padding), ) speed_label = widgets.Label( layout=widgets.Layout(width="25px", padding=padding), ) widgets.jslink((speed, "value"), (speed_label, "value")) prebuilt_options = widgets.VBox() cloud = widgets.Checkbox( value=True, description="Apply fmask (remove clouds, shadows, snow)", tooltip="Apply fmask (remove clouds, shadows, snow)", style=style, ) start_year = widgets.IntSlider( description="Start Year:", value=1984, min=1984, max=2021, readout=False, style=style, layout=widgets.Layout(width="138px", padding=padding), ) def year_change(change): if change["new"]: if collection.value != "MOD13A2.006 Terra Vegetation Indices": labels.value = ", ".join( str(i) for i in range(int(start_year.value), int(end_year.value) + 1) ) else: modis_labels = [] for i in range(int(start_year.value), int(end_year.value) + 1): for j in range(1, 13): modis_labels.append(str(i) + "-" + str(j).zfill(2)) labels.value = ", ".join(modis_labels) start_year.observe(year_change, "value") start_year_label = widgets.Label() widgets.jslink((start_year, "value"), (start_year_label, "value")) end_year = widgets.IntSlider( description="End Year:", value=2020, min=1984, max=2021, readout=False, style=style, layout=widgets.Layout(width="138px", padding=padding), ) end_year.observe(year_change, "value") end_year_label = widgets.Label() widgets.jslink((end_year, "value"), (end_year_label, "value")) start_month = widgets.IntSlider( description="Start Month:", value=1, min=1, max=12, readout=False, style=style, layout=widgets.Layout(width="145px", padding=padding), ) start_month_label = widgets.Label( layout=widgets.Layout(width="20px", padding=padding), ) widgets.jslink((start_month, "value"), (start_month_label, "value")) end_month = widgets.IntSlider( description="End Month:", value=12, min=1, max=12, readout=False, style=style, layout=widgets.Layout(width="155px", padding=padding), ) end_month_label = widgets.Label() widgets.jslink((end_month, "value"), (end_month_label, "value")) prebuilt_options.children = [ widgets.HBox([start_year, start_year_label, end_year, end_year_label]), widgets.HBox([start_month, start_month_label, end_month, end_month_label]), cloud, ] button_width = "113px" apply_btn = widgets.Button( description="Apply", button_style="primary", tooltip="Apply the settings to activate the time slider", style=style, layout=widgets.Layout(padding="0px", width=button_width), ) def submit_clicked(b): output.clear_output() with output: if start_year.value > end_year.value: print("The end year must be great than the start year.") return if start_month.value > end_month.value: print("The end month must be great than the start month.") return if m is not None: roi = None if region.value == "User-drawn ROI" and (m.user_roi is not None): roi = m.user_roi elif region.value == "User-drawn ROI" and (m.user_roi is None): with output: print("Use the Drawing tool to create an ROI.") return elif region.value in m.ee_layer_dict: roi = m.ee_layer_dict[region.value]["ee_object"] with output: print("Computing... Please wait...") layer_labels = None vis_params = {} try: if vis_min.value != "": vis_params["min"] = float(vis_min.value) if vis_max.value != "": vis_params["max"] = float(vis_max.value) vis_params["opacity"] = float(opacity.value) if len(bands_hbox.children) > 0 and ( band1_dropdown.value and band2_dropdown.value and band3_dropdown.value ): vis_params["bands"] = [ band1_dropdown.value, band2_dropdown.value, band3_dropdown.value, ] vis_params["gamma"] = float(gamma.value) if len(palette_vbox.children) > 0: if "," in palette.value: vis_params["palette"] = [ i.strip() for i in palette.value.split(",") ] elif len(palette.value) > 0: vis_params["palette"] = palette.value.strip() except Exception as _: with output: print("The vis parmas are invalid.") return if labels.value != "" and "," in labels.value: try: layer_labels = [i.strip() for i in labels.value.split(",")] except Exception as e: raise ValueError(e) if collection.value in m.ee_raster_layer_names: layer = m.ee_layer_dict[collection.value] ee_object = layer["ee_object"] elif collection.value in col_options_dict: start_date = str(start_month.value).zfill(2) + "-01" end_date = str(end_month.value).zfill(2) + "-30" if collection.value == "Landsat TM-ETM-OLI Surface Reflectance": ee_object = landsat_timeseries( roi, int(start_year.value), int(end_year.value), start_date, end_date, cloud.value, ) elif collection.value == "MOD13A2.006 Terra Vegetation Indices": ee_object = modis_timeseries( roi=roi, start_year=int(start_year.value), end_year=int(end_year.value), start_date=start_date, end_date=end_date, ) elif collection.value == "Sentinel-2 Surface Relectance": ee_object = sentinel2_timeseries( roi, int(start_year.value), int(end_year.value), start_date, end_date, cloud.value, ) elif collection.value == "USDA NAIP Imagery": if int(start_year.value) < 2009 and ( band1_dropdown.value == "N" or band2_dropdown.value == "N" or band3_dropdown.value == "N" ): with output: output.clear_output() print("4-band NAIP imagery not available before 2009.") return ee_object = naip_timeseries(roi, start_year.value, end_year.value) m.add_time_slider( ee_object, region=roi, vis_params=vis_params, labels=layer_labels, time_interval=speed.value, ) output.clear_output() if m.colorbar_ctrl is not None: m.remove_control(m.colorbar_ctrl) m.colorbar_ctrl = None apply_btn.on_click(submit_clicked) reset_btn = widgets.Button( description="Reset", button_style="primary", style=style, layout=widgets.Layout(padding="0px", width=button_width), ) def reset_btn_click(change): output.clear_output() collection.value = col_options[0] region.value = "User-drawn ROI" vis.value = "" labels.value = "1, 2, 3" speed.value = 1 if m.colorbar_ctrl is not None: m.remove_control(m.colorbar_ctrl) m.colorbar_ctrl = None reset_btn.on_click(reset_btn_click) close_btn = widgets.Button( description="Close", button_style="primary", style=style, layout=widgets.Layout(padding="0px", width=button_width), ) def close_click(change): if m is not None: m.toolbar_reset() if m.tool_control is not None and m.tool_control in m.controls: m.remove_control(m.tool_control) m.tool_control = None if m.colorbar_ctrl is not None: m.remove_control(m.colorbar_ctrl) m.colorbar_ctrl = None toolbar_widget.close() close_btn.on_click(close_click) def collection_changed(change): if change["new"]: selected = change["owner"].value if selected in m.ee_layer_dict: prebuilt_options.children = [] labels.value = "" region.value = None ee_object = m.ee_layer_dict[selected]["ee_object"] vis_params = m.ee_layer_dict[selected]["vis_params"] if isinstance(ee_object, ee.Image): palette_vbox.children = [ widgets.HBox([classes, colormap]), widgets.HBox( [palette, color_picker, add_color, del_color, reset_color] ), ] bands_hbox.children = [] elif isinstance(ee_object, ee.ImageCollection): first = ee.Image(ee_object.first()) band_names = first.bandNames().getInfo() band_count = len(band_names) if band_count > 2: band1_dropdown.options = band_names band2_dropdown.options = band_names band3_dropdown.options = band_names band1_dropdown.value = band_names[2] band2_dropdown.value = band_names[1] band3_dropdown.value = band_names[0] palette_vbox.children = [] bands_hbox.children = [ bands_label, band1_dropdown, band2_dropdown, band3_dropdown, ] else: palette_vbox.children = [ widgets.HBox([classes, colormap]), widgets.HBox( [ palette, color_picker, add_color, del_color, reset_color, ] ), ] bands_hbox.children = [] if "min" in vis_params: vis_min.value = str(vis_params["min"]) if "max" in vis_params: vis_max.value = str(vis_params["max"]) if "opacity" in vis_params: opacity.value = str(vis_params["opacity"]) if "gamma" in vis_params: if isinstance(vis_params["gamma"], list): gamma.value = str(vis_params["gamma"][0]) else: gamma.value = str(vis_params["gamma"]) if "palette" in vis_params: palette.value = ", ".join(vis_params["palette"]) else: prebuilt_options.children = [ widgets.HBox( [start_year, start_year_label, end_year, end_year_label] ), widgets.HBox( [start_month, start_month_label, end_month, end_month_label] ), cloud, ] if selected == "MOD13A2.006 Terra Vegetation Indices": palette_vbox.children = [ widgets.HBox([classes, colormap]), widgets.HBox( [ palette, color_picker, add_color, del_color, reset_color, ] ), ] bands_hbox.children = [] palette.value = ", ".join(col_options_dict[selected]["palette"]) modis_labels = [] for i in range(int(start_year.value), int(end_year.value) + 1): for j in range(1, 13): modis_labels.append(str(i) + "-" + str(j).zfill(2)) labels.value = ", ".join(modis_labels) else: bands_hbox.children = [ bands_label, band1_dropdown, band2_dropdown, band3_dropdown, ] bandnames = col_options_dict[selected]["bandnames"] band1_dropdown.options = bandnames band2_dropdown.options = bandnames band3_dropdown.options = bandnames if ( selected == "Landsat TM-ETM-OLI Surface Reflectance" or selected == "Sentinel-2 Surface Relectance" ): band1_dropdown.value = bandnames[2] band2_dropdown.value = bandnames[1] band3_dropdown.value = bandnames[0] palette_vbox.children = [] elif selected == "USDA NAIP Imagery": band1_dropdown.value = bandnames[0] band2_dropdown.value = bandnames[1] band3_dropdown.value = bandnames[2] palette_vbox.children = [] labels.value = ", ".join( str(i) for i in range(int(start_year.value), int(end_year.value) + 1) ) start_year.min = col_options_dict[selected]["start_year"] start_year.max = col_options_dict[selected]["end_year"] start_year.value = start_year.min end_year.min = col_options_dict[selected]["start_year"] end_year.max = col_options_dict[selected]["end_year"] end_year.value = end_year.max vis_min.value = str(col_options_dict[selected]["min"]) vis_max.value = str(col_options_dict[selected]["max"]) if selected == "MOD13A2.006 Terra Vegetation Indices": start_year.value = "2001" end_year.value = "2020" elif selected == "USDA NAIP Imagery": start_year.value = "2009" end_year.value = "2019" collection.observe(collection_changed, "value") output = widgets.Output(layout=widgets.Layout(width=widget_width, padding=padding)) toolbar_widget = widgets.VBox() toolbar_widget.children = [toolbar_button] toolbar_header = widgets.HBox() toolbar_header.children = [close_button, toolbar_button] toolbar_footer = widgets.VBox() toolbar_footer.children = [ collection, region, bands_hbox, widgets.HBox([vis_min, vis_max]), widgets.HBox([opacity, opacity_label, gamma, gamma_label]), palette_vbox, widgets.HBox([labels, speed, speed_label]), prebuilt_options, widgets.HBox([apply_btn, reset_btn, close_btn]), output, ] toolbar_event = ipyevents.Event( source=toolbar_widget, watched_events=["mouseenter", "mouseleave"] ) def handle_toolbar_event(event): if event["type"] == "mouseenter": toolbar_widget.children = [toolbar_header, toolbar_footer] elif event["type"] == "mouseleave": if not toolbar_button.value: toolbar_widget.children = [toolbar_button] toolbar_button.value = False close_button.value = False toolbar_event.on_dom_event(handle_toolbar_event) def toolbar_btn_click(change): if change["new"]: close_button.value = False toolbar_widget.children = [toolbar_header, toolbar_footer] else: if not close_button.value: toolbar_widget.children = [toolbar_button] toolbar_button.observe(toolbar_btn_click, "value") def close_btn_click(change): if change["new"]: toolbar_button.value = False if m is not None: if m.tool_control is not None and m.tool_control in m.controls: m.remove_control(m.tool_control) m.tool_control = None m.toolbar_reset() toolbar_widget.close() if m.colorbar_ctrl is not None: m.remove_control(m.colorbar_ctrl) m.colorbar_ctrl = None close_button.observe(close_btn_click, "value") toolbar_button.value = True if m is not None: toolbar_control = ipyleaflet.WidgetControl( widget=toolbar_widget, position="topright" ) if toolbar_control not in m.controls: m.add_control(toolbar_control) m.tool_control = toolbar_control else: return toolbar_widget def plot_transect(m=None): from bqplot import pyplot as plt widget_width = "250px" padding = "0px 0px 0px 5px" # upper, right, bottom, left toolbar_button = widgets.ToggleButton( value=False, tooltip="Show or hide the toolbar", icon="line-chart", layout=widgets.Layout(width="28px", height="28px", padding="0px 0px 0px 4px"), ) close_button = widgets.ToggleButton( value=False, tooltip="Close the tool", icon="times", button_style="primary", layout=widgets.Layout(height="28px", width="28px", padding="0px 0px 0px 4px"), ) layer = widgets.Dropdown( options=["Option 1", "Option 2", "Option 3"], value=None, description="Image:", layout=widgets.Layout(width=widget_width, padding=padding), style={"description_width": "initial"}, ) band = widgets.Dropdown( options=["Option 1", "Option 2", "Option 3"], value=None, description="Band:", layout=widgets.Layout(width=widget_width, padding=padding), style={"description_width": "initial"}, ) reducer = widgets.Dropdown( options=["mean", "median", "min", "max", "mode", "sum", "stdDev", "variance"], value="mean", description="Stats:", layout=widgets.Layout(width="120px", padding=padding), style={"description_width": "initial"}, ) segments = widgets.IntText( value="100", description="Segments:", placeholder="Number of segments", style={"description_width": "initial"}, layout=widgets.Layout(width="126px", padding=padding), ) dist_interval = widgets.Text( value="", description="Distance interval (m):", placeholder="Optional", style={"description_width": "initial"}, layout=widgets.Layout(width=widget_width, padding=padding), ) title = widgets.Text( value="", description="Plot title:", placeholder="Plot title", style={"description_width": "initial"}, layout=widgets.Layout(width=widget_width, padding=padding), ) xlabel = widgets.Text( value="", description="xlabel:", placeholder="x-axis", style={"description_width": "initial"}, layout=widgets.Layout(width="123px", padding=padding), ) ylabel = widgets.Text( value="", description="ylabel:", placeholder="y-axis", style={"description_width": "initial"}, layout=widgets.Layout(width="123px", padding=padding), ) buttons = widgets.ToggleButtons( value=None, options=["Plot", "Reset", "Close"], tooltips=["Plot transect", "Reset", "Close"], button_style="primary", ) buttons.style.button_width = "80px" output = widgets.Output( layout=widgets.Layout(max_width="500px", max_height="265px", padding=padding) ) toolbar_widget = widgets.VBox() toolbar_widget.children = [toolbar_button] toolbar_header = widgets.HBox() toolbar_header.children = [close_button, toolbar_button] toolbar_footer = widgets.VBox() toolbar_footer.children = [ layer, band, widgets.HBox([reducer, segments]), dist_interval, title, widgets.HBox([xlabel, ylabel]), buttons, ] toolbar_event = ipyevents.Event( source=toolbar_widget, watched_events=["mouseenter", "mouseleave"] ) if m is not None: layer.options = m.ee_raster_layer_names if len(layer.options) > 0: image = m.ee_layer_dict[layer.value]["ee_object"] if isinstance(image, ee.ImageCollection): image = image.toBands() band.options = image.bandNames().getInfo() transect_control = ipyleaflet.WidgetControl( widget=output, position="bottomright" ) m.add_control(transect_control) m.transect_control = transect_control def layer_changed(change): if change["new"]: if m is not None: image = m.ee_layer_dict[layer.value]["ee_object"] if isinstance(image, ee.ImageCollection): image = image.toBands() band.options = image.bandNames().getInfo() layer.observe(layer_changed, "value") def handle_toolbar_event(event): if event["type"] == "mouseenter": toolbar_widget.children = [toolbar_header, toolbar_footer] elif event["type"] == "mouseleave": if not toolbar_button.value: toolbar_widget.children = [toolbar_button] toolbar_button.value = False close_button.value = False toolbar_event.on_dom_event(handle_toolbar_event) def toolbar_btn_click(change): if change["new"]: close_button.value = False toolbar_widget.children = [toolbar_header, toolbar_footer] else: if not close_button.value: toolbar_widget.children = [toolbar_button] toolbar_button.observe(toolbar_btn_click, "value") def close_btn_click(change): if change["new"]: toolbar_button.value = False if m is not None: m.toolbar_reset() if m.tool_control is not None and m.tool_control in m.controls: m.remove_control(m.tool_control) m.tool_control = None if m.transect_control is not None and m.transect_control in m.controls: m.remove_control(m.transect_control) m.transect_control = None toolbar_widget.close() close_button.observe(close_btn_click, "value") def button_clicked(change): if change["new"] == "Plot": with output: output.clear_output() if m is not None: if m.user_roi is not None: line = m.user_roi geom_type = line.type().getInfo() if geom_type != "LineString": print("Use drawing tool to draw a line") else: image = m.ee_layer_dict[layer.value]["ee_object"] if isinstance(image, ee.ImageCollection): image = image.toBands() image = image.select([band.value]) if dist_interval.value == "": dist = None else: dist = float(dist_interval.value) print("Computing ...") df = extract_transect( image, line, reducer.value, int(segments.value), dist, to_pandas=True, ) output.clear_output() fig = plt.figure(title=title.value) fig.layout.width = output.layout.max_width fig.layout.height = output.layout.max_height plt.plot(df["distance"], df[reducer.value]) plt.xlabel(xlabel.value) plt.ylabel(ylabel.value) plt.show() else: print("Use drawing tool to draw a line") elif change["new"] == "Reset": output.clear_output() elif change["new"] == "Close": if m is not None: m.toolbar_reset() if m.tool_control is not None and m.tool_control in m.controls: m.remove_control(m.tool_control) m.tool_control = None if m.transect_control is not None and m.transect_control in m.controls: m.remove_control(m.transect_control) m.transect_control = None toolbar_widget.close() buttons.value = None buttons.observe(button_clicked, "value") toolbar_button.value = True if m is not None: toolbar_control = ipyleaflet.WidgetControl( widget=toolbar_widget, position="topright" ) if toolbar_control not in m.controls: m.add_control(toolbar_control) m.tool_control = toolbar_control else: return toolbar_widget def sankee_gui(m=None): import sankee widget_width = "250px" padding = "0px 0px 0px 5px" # upper, right, bottom, left toolbar_button = widgets.ToggleButton( value=False, tooltip="Toolbar", icon="random", layout=widgets.Layout(width="28px", height="28px", padding="0px 0px 0px 4px"), ) close_button = widgets.ToggleButton( value=False, tooltip="Close the tool", icon="times", button_style="primary", layout=widgets.Layout(height="28px", width="28px", padding="0px 0px 0px 4px"), ) region = widgets.Dropdown( options=["User-drawn ROI"], value="User-drawn ROI", description="Region:", layout=widgets.Layout(width=widget_width, padding=padding), style={"description_width": "initial"}, ) def region_changed(change): if change["new"] == "Las Vegas": if m is not None: las_vegas = ee.Geometry.Polygon( [ [ [-115.01184401606046, 36.24170785506492], [-114.98849806879484, 36.29928186470082], [-115.25628981684171, 36.35238941394592], [-115.34692702387296, 36.310348922031565], [-115.37988600824796, 36.160811202271944], [-115.30298171137296, 36.03653336474891], [-115.25628981684171, 36.05207884201088], [-115.26590285395109, 36.226199908103695], [-115.19174513910734, 36.25499793268206], ] ] ) m.addLayer(las_vegas, {}, "Las Vegas") m.centerObject(las_vegas, 10) region.observe(region_changed, "value") dataset = widgets.Dropdown( options=[ "NLCD - National Land Cover Database", "MCD12Q1 - MODIS Global Land Cover", "CGLS - Copernicus Global Land Cover", "LCMS - Land Change Monitoring System", ], value="NLCD - National Land Cover Database", description="Dataset:", layout=widgets.Layout(width=widget_width, padding=padding), style={"description_width": "initial"}, ) NLCD_options = ["2001", "2004", "2006", "2008", "2011", "2013", "2016"] MODIS_options = [str(y) for y in range(2001, 2020)] CGLS_options = [str(y) for y in range(2015, 2020)] LCMS_options = [str(y) for y in range(1985, 2021)] before = widgets.Dropdown( options=NLCD_options, value="2001", description="Before:", layout=widgets.Layout(width="123px", padding=padding), style={"description_width": "initial"}, ) after = widgets.Dropdown( options=NLCD_options, value="2016", description="After:", layout=widgets.Layout(width="123px", padding=padding), style={"description_width": "initial"}, ) def dataset_changed(change): if change["new"] == "NLCD - National Land Cover Database": before.options = NLCD_options after.options = NLCD_options before.value = NLCD_options[0] after.value = NLCD_options[-1] elif change["new"] == "MCD12Q1 - MODIS Global Land Cover": before.options = MODIS_options after.options = MODIS_options before.value = MODIS_options[0] after.value = MODIS_options[-1] elif change["new"] == "CGLS - Copernicus Global Land Cover": before.options = CGLS_options after.options = CGLS_options before.value = CGLS_options[0] after.value = CGLS_options[-1] elif change["new"] == "LCMS - Land Change Monitoring System": before.options = LCMS_options after.options = LCMS_options before.value = LCMS_options[0] after.value = LCMS_options[-1] dataset.observe(dataset_changed, "value") dataset_template = { "NLCD - National Land Cover Database": sankee.datasets.NLCD2016, "MCD12Q1 - MODIS Global Land Cover": sankee.datasets.MODIS_LC_TYPE1, "CGLS - Copernicus Global Land Cover": sankee.datasets.CGLS_LC100, "LCMS - Land Change Monitoring System": sankee.datasets.LCMS_LC, } band_name = { "NLCD - National Land Cover Database": "landcover", "MCD12Q1 - MODIS Global Land Cover": "LC_Type1", "CGLS - Copernicus Global Land Cover": "discrete_classification", "LCMS - Land Change Monitoring System": "Land_Cover", } samples = widgets.IntText( value=1000, description="Samples:", placeholder="The number of samples points to randomly generate for characterizing all images", style={"description_width": "initial"}, layout=widgets.Layout(width="133px", padding=padding), ) classes = widgets.IntText( value=6, description="Classes:", style={"description_width": "initial"}, layout=widgets.Layout(width="113px", padding=padding), ) title = widgets.Text( value="Land Cover Change", description="Title:", style={"description_width": "initial"}, layout=widgets.Layout(width=widget_width, padding=padding), ) buttons = widgets.ToggleButtons( value=None, options=["Apply", "Reset", "Close"], tooltips=["Apply", "Reset", "Close"], button_style="primary", ) buttons.style.button_width = "80px" output = widgets.Output(layout=widgets.Layout(padding=padding)) toolbar_widget = widgets.VBox() toolbar_widget.children = [toolbar_button] toolbar_header = widgets.HBox() toolbar_header.children = [close_button, toolbar_button] toolbar_footer = widgets.VBox() toolbar_footer.children = [ region, dataset, widgets.HBox([before, after]), widgets.HBox([samples, classes]), title, buttons, output, ] toolbar_event = ipyevents.Event( source=toolbar_widget, watched_events=["mouseenter", "mouseleave"] ) if m is not None: if "Las Vegas" not in m.ee_vector_layer_names: region.options = ["User-drawn ROI", "Las Vegas"] + m.ee_vector_layer_names else: region.options = ["User-drawn ROI"] + m.ee_vector_layer_names plot_close_btn = widgets.Button( tooltip="Close the plot", icon="times", layout=widgets.Layout( height="28px", width="28px", padding="0px 0px 0px 0px" ), ) def plot_close_btn_clicked(b): plot_widget.children = [] plot_close_btn.on_click(plot_close_btn_clicked) plot_reset_btn = widgets.Button( tooltip="Reset the plot", icon="home", layout=widgets.Layout( height="28px", width="28px", padding="0px 0px 0px 0px" ), ) def plot_reset_btn_clicked(b): m.sankee_plot.update_layout( width=600, height=250, margin=dict(l=10, r=10, b=10, t=50, pad=5), ) with plot_output: plot_output.clear_output() display(m.sankee_plot) plot_reset_btn.on_click(plot_reset_btn_clicked) plot_fullscreen_btn = widgets.Button( tooltip="Fullscreen the plot", icon="arrows-alt", layout=widgets.Layout( height="28px", width="28px", padding="0px 0px 0px 0px" ), ) def plot_fullscreen_btn_clicked(b): m.sankee_plot.update_layout( width=1030, height=int(m.layout.height[:-2]) - 60, margin=dict(l=10, r=10, b=10, t=50, pad=5), ) with plot_output: plot_output.clear_output() display(m.sankee_plot) plot_fullscreen_btn.on_click(plot_fullscreen_btn_clicked) width_btn = widgets.Button( tooltip="Change plot width", icon="arrows-h", layout=widgets.Layout( height="28px", width="28px", padding="0px 0px 0px 0px" ), ) def width_btn_clicked(b): m.sankee_plot.update_layout( width=1030, margin=dict(l=10, r=10, b=10, t=50, pad=5), ) with plot_output: plot_output.clear_output() display(m.sankee_plot) width_btn.on_click(width_btn_clicked) height_btn = widgets.Button( tooltip="Change plot height", icon="arrows-v", layout=widgets.Layout( height="28px", width="28px", padding="0px 0px 0px 0px" ), ) def height_btn_clicked(b): m.sankee_plot.update_layout( height=int(m.layout.height[:-2]) - 60, margin=dict(l=10, r=10, b=10, t=50, pad=5), ) with plot_output: plot_output.clear_output() display(m.sankee_plot) height_btn.on_click(height_btn_clicked) width_slider = widgets.IntSlider( value=600, min=400, max=1030, step=10, description="", readout=False, continuous_update=False, layout=widgets.Layout(width="100px", padding=padding), style={"description_width": "initial"}, ) width_slider_label = widgets.Label( layout=widgets.Layout(padding="0px 10px 0px 0px") ) widgets.jslink((width_slider, "value"), (width_slider_label, "value")) def width_changed(change): if change["new"]: m.sankee_plot.update_layout( width=width_slider.value, margin=dict(l=10, r=10, b=10, t=50, pad=5), ) with plot_output: plot_output.clear_output() display(m.sankee_plot) width_slider.observe(width_changed, "value") height_slider = widgets.IntSlider( value=250, min=200, max=int(m.layout.height[:-2]) - 60, step=10, description="", readout=False, continuous_update=False, layout=widgets.Layout(width="100px", padding=padding), style={"description_width": "initial"}, ) height_slider_label = widgets.Label() widgets.jslink((height_slider, "value"), (height_slider_label, "value")) def height_changed(change): if change["new"]: m.sankee_plot.update_layout( height=height_slider.value, margin=dict(l=10, r=10, b=10, t=50, pad=5), ) with plot_output: plot_output.clear_output() display(m.sankee_plot) height_slider.observe(height_changed, "value") plot_output = widgets.Output() plot_widget = widgets.VBox([plot_output]) sankee_control = ipyleaflet.WidgetControl( widget=plot_widget, position="bottomright" ) m.add_control(sankee_control) m.sankee_control = sankee_control def handle_toolbar_event(event): if event["type"] == "mouseenter": toolbar_widget.children = [toolbar_header, toolbar_footer] elif event["type"] == "mouseleave": if not toolbar_button.value: toolbar_widget.children = [toolbar_button] toolbar_button.value = False close_button.value = False toolbar_event.on_dom_event(handle_toolbar_event) def toolbar_btn_click(change): if change["new"]: close_button.value = False toolbar_widget.children = [toolbar_header, toolbar_footer] else: if not close_button.value: toolbar_widget.children = [toolbar_button] toolbar_button.observe(toolbar_btn_click, "value") def close_btn_click(change): if change["new"]: toolbar_button.value = False if m is not None: m.toolbar_reset() if m.tool_control is not None and m.tool_control in m.controls: m.remove_control(m.tool_control) m.tool_control = None if m.sankee_control is not None and m.sankee_control in m.controls: m.remove_control(m.sankee_control) m.sankee_control = None toolbar_widget.close() close_button.observe(close_btn_click, "value") def button_clicked(change): if change["new"] == "Apply": with output: output.clear_output() plot_output.clear_output() print("Running ...") if m is not None: exclude_classes = [] if "NLCD" in dataset.value: before_img = ee.Image(f"USGS/NLCD/NLCD{before.value}") after_img = ee.Image(f"USGS/NLCD/NLCD{after.value}") vis_params = {} elif "MODIS" in dataset.value: before_img = ee.Image(f"MODIS/006/MCD12Q1/{before.value}_01_01") after_img = ee.Image(f"MODIS/006/MCD12Q1/{after.value}_01_01") vis_params = { "min": 1.0, "max": 17.0, "palette": [ "05450a", "086a10", "54a708", "78d203", "009900", "c6b044", "dcd159", "dade48", "fbff13", "b6ff05", "27ff87", "c24f44", "a5a5a5", "ff6d4c", "69fff8", "f9ffa4", "1c0dff", ], } elif "CGLS" in dataset.value: before_img = ee.Image( f"COPERNICUS/Landcover/100m/Proba-V-C3/Global/{before.value}" ) after_img = ee.Image( f"COPERNICUS/Landcover/100m/Proba-V-C3/Global/{after.value}" ) vis_params = {} elif "LCMS" in dataset.value: before_img = ee.Image( f"USFS/GTAC/LCMS/v2020-5/LCMS_CONUS_v2020-5_{before.value}" ) after_img = ee.Image( f"USFS/GTAC/LCMS/v2020-5/LCMS_CONUS_v2020-5_{after.value}" ) vis_params = {} # LCMS Land Cover class 15 is a no data mask and should be excluded exclude_classes.append(15) img_list = [before_img, after_img] label_list = [before.value, after.value] image1 = before_img.select(band_name[dataset.value]) image2 = after_img.select(band_name[dataset.value]) if region.value != "User-drawn ROI" or ( region.value == "User-drawn ROI" and m.user_roi is not None ): if region.value == "User-drawn ROI": geom = m.user_roi image1 = image1.clip(geom) image2 = image2.clip(geom) else: roi_object = m.ee_layer_dict[region.value]["ee_object"] if region.value == "Las Vegas": m.centerObject(roi_object, 10) if isinstance(roi_object, ee.Geometry): geom = roi_object image1 = image1.clip(geom) image2 = image2.clip(geom) else: roi_object = ee.FeatureCollection(roi_object) image1 = image1.clipToCollection(roi_object) image2 = image2.clipToCollection(roi_object) geom = roi_object.geometry() if len(title.value) > 0: plot_title = title.value else: plot_title = None m.default_style = {"cursor": "wait"} plot = sankee.sankify( img_list, geom, label_list, dataset_template[dataset.value], max_classes=classes.value, n=int(samples.value), title=plot_title, exclude=exclude_classes, ) output.clear_output() plot_output.clear_output() with plot_output: plot.update_layout( width=600, height=250, margin=dict(l=10, r=10, b=10, t=50, pad=5), ) plot_widget.children = [ widgets.HBox( [ plot_close_btn, plot_reset_btn, plot_fullscreen_btn, width_btn, width_slider, width_slider_label, height_btn, height_slider, height_slider_label, ] ), plot_output, ] display(plot) m.sankee_plot = plot m.addLayer(image1, vis_params, before.value) m.addLayer(image2, vis_params, after.value) m.default_style = {"cursor": "default"} else: with output: output.clear_output() print("Draw a polygon on the map.") elif change["new"] == "Reset": output.clear_output() plot_output.clear_output() plot_widget.children = [] elif change["new"] == "Close": if m is not None: m.toolbar_reset() if m.tool_control is not None and m.tool_control in m.controls: m.remove_control(m.tool_control) m.tool_control = None if m.sankee_control is not None and m.sankee_control in m.controls: m.remove_control(m.sankee_control) m.sankee_control = None toolbar_widget.close() buttons.value = None buttons.observe(button_clicked, "value") toolbar_button.value = True if m is not None: toolbar_control = ipyleaflet.WidgetControl( widget=toolbar_widget, position="topright" ) if toolbar_control not in m.controls: m.add_control(toolbar_control) m.tool_control = toolbar_control else: return toolbar_widget def split_basemaps( m, layers_dict=None, left_name=None, right_name=None, width="120px", **kwargs ): from .basemaps import basemap_tiles controls = m.controls layers = m.layers m.layers = [m.layers[0]] m.clear_controls() add_zoom = True add_fullscreen = True if layers_dict is None: layers_dict = {} keys = dict(basemap_tiles).keys() for key in keys: if isinstance(basemap_tiles[key], ipyleaflet.WMSLayer): pass else: layers_dict[key] = basemap_tiles[key] keys = list(layers_dict.keys()) if left_name is None: left_name = keys[0] if right_name is None: right_name = keys[-1] left_layer = layers_dict[left_name] right_layer = layers_dict[right_name] control = ipyleaflet.SplitMapControl(left_layer=left_layer, right_layer=right_layer) m.add_control(control) left_dropdown = widgets.Dropdown( options=keys, value=left_name, layout=widgets.Layout(width=width) ) left_control = ipyleaflet.WidgetControl(widget=left_dropdown, position="topleft") m.add_control(left_control) right_dropdown = widgets.Dropdown( options=keys, value=right_name, layout=widgets.Layout(width=width) ) right_control = ipyleaflet.WidgetControl(widget=right_dropdown, position="topright") m.add_control(right_control) close_button = widgets.ToggleButton( value=False, tooltip="Close the tool", icon="times", # button_style="primary", layout=widgets.Layout(height="28px", width="28px", padding="0px 0px 0px 4px"), ) def close_btn_click(change): if change["new"]: m.controls = controls m.layers = layers close_button.observe(close_btn_click, "value") close_control = ipyleaflet.WidgetControl( widget=close_button, position="bottomright" ) m.add_control(close_control) if add_zoom: m.add_control(ipyleaflet.ZoomControl()) if add_fullscreen: m.add_control(ipyleaflet.FullScreenControl()) m.add_control(ipyleaflet.ScaleControl(position="bottomleft")) split_control = None for ctrl in m.controls: if isinstance(ctrl, ipyleaflet.SplitMapControl): split_control = ctrl break def left_change(change): split_control.left_layer.url = layers_dict[left_dropdown.value].url left_dropdown.observe(left_change, "value") def right_change(change): split_control.right_layer.url = layers_dict[right_dropdown.value].url right_dropdown.observe(right_change, "value")
"""Process fictitious N2 cross sections from LxCAT. """ from nepc.curate.curate import CurateLxCAT, curate_client from nepc.util import config NEPC_HOME = config.nepc_home() datadir = NEPC_HOME + "/tests/data" states = { 'X1': 'N2(X1Sigmag+)', 'B3': 'N2(B3Pig)', 'W3': 'N2(W3Deltau)', 'B\'3': 'N2(Bp3Sigmau-)', 'a\'1': 'N2(ap1Sigmau-)', 'a1': 'N2(a1Pig)', 'w1': 'N2(w1Deltau)', 'C3': 'N2(C3Piu)', 'E3': 'N2(E3Sigmag+)', 'a\'\'1': 'N2(ap1Sigmag+)', 'B2SIGMA': 'N2+(B2Sigmau+)', 'SUM': 'N2(1SUM)_Z-M', 'N2\\^\\+': 'N2+' } augment_dicts = [[{'kind': 'EXCITATION'}, {'process': 'excitation', 'lhs_a': states['X1'], 'models': ['fict']}]] for key in states.keys(): augment_dicts.append([{'product': key}, {'rhs_a': states[key]}]) for i in range(9): augment_dicts.append([{'kind': 'EXCITATION', 'product': f'v{i}', 'background': f'V={i}'}, {'process': 'excitation_v', 'lhs_a': f'{states['X1']}', 'rhs_a': f'{states['X1']}', 'lhs_v': '0', 'rhs_v': f'{i}', 'models': ['fict']}]) augment_dicts = augment_dicts + [ [{'product': 'v1res', 'background': 'V=1'}, {'rhs_a': 'N2(X1Sigmag+)', 'lhs_v': 0, 'rhs_v': 1}], [{'product': 'v0-4', 'background': 'V=0-4'}, {'rhs_a': 'N2(A3Sigmau+)_v0-4', 'lhs_v': '-1', 'rhs_v': '-1', 'process': 'excitation'}], [{'product': 'v5-9', 'background': 'V=5-9'}, {'rhs_a': 'N2(A3Sigmau+)_v5-9', 'lhs_v': '-1', 'rhs_v': '-1', 'process': 'excitation'}], [{'product': 'v10-', 'background': 'V=10-'}, {'rhs_a': 'N2(A3Sigmau+)_v10-', 'lhs_v': '-1', 'rhs_v': '-1', 'process': 'excitation'}], [{'kind': 'IONIZATION', 'product': 'N2\^\+'}, {'process': 'ionization_total', 'lhs_a': states['X1'], 'rhs_a': 'N2+', 'models': ['fict', 'fict_min', 'fict_min2']}], [{'kind': 'EXCITATION', 'product': 'rot', 'background': 'SLAR'}, {'process': 'excitation', 'lhs_a': states['X1'], 'rhs_a': 'N2(X1Sigmag+)_jSLAR', 'models': ['fict', 'fict_min2']}], [{'kind': 'IONIZATION', 'product': 'B2SIGMA'}, {'process': 'ionization', 'lhs_a': states['X1'], 'rhs_a': states['B2SIGMA'], 'models': ['fict']}], [{'kind': 'EFFECTIVE'}, {'process': 'total', 'lhs_a': 'N2', 'rhs_a': 'N2', 'models': ['fict', 'fict_min', 'fict_min2']}], [{'kind': 'EXCITATION', 'product': 'rot', 'background': 'SCHULZ'}, {'process': 'excitation', 'lhs_a': states['X1'], 'rhs_a': 'N2(X1Sigmag+)_jSCHULZ', 'models': ['fict']}]] curate_client(CurateLxCAT(), datadir, species='n2', title='fict', units_e='1.0', units_sigma='1.0', augment_dicts=augment_dicts, initialize_nepc=True, test=True)
"""Process fictitious N2 cross sections from LxCAT. """ from nepc.curate.curate import CurateLxCAT, curate_client from nepc.util import config NEPC_HOME = config.nepc_home() datadir = NEPC_HOME + "/tests/data" states = { 'X1': 'N2(X1Sigmag+)', 'B3': 'N2(B3Pig)', 'W3': 'N2(W3Deltau)', 'B\'3': 'N2(Bp3Sigmau-)', 'a\'1': 'N2(ap1Sigmau-)', 'a1': 'N2(a1Pig)', 'w1': 'N2(w1Deltau)', 'C3': 'N2(C3Piu)', 'E3': 'N2(E3Sigmag+)', 'a\'\'1': 'N2(ap1Sigmag+)', 'B2SIGMA': 'N2+(B2Sigmau+)', 'SUM': 'N2(1SUM)_Z-M', 'N2\\^\\+': 'N2+' } augment_dicts = [[{'kind': 'EXCITATION'}, {'process': 'excitation', 'lhs_a': states['X1'], 'models': ['fict']}]] for key in states.keys(): augment_dicts.append([{'product': key}, {'rhs_a': states[key]}]) for i in range(9): augment_dicts.append([{'kind': 'EXCITATION', 'product': f'v{i}', 'background': f'V={i}'}, {'process': 'excitation_v', 'lhs_a': f'{states["X1"]}', 'rhs_a': f'{states["X1"]}', 'lhs_v': '0', 'rhs_v': f'{i}', 'models': ['fict']}]) augment_dicts = augment_dicts + [ [{'product': 'v1res', 'background': 'V=1'}, {'rhs_a': 'N2(X1Sigmag+)', 'lhs_v': 0, 'rhs_v': 1}], [{'product': 'v0-4', 'background': 'V=0-4'}, {'rhs_a': 'N2(A3Sigmau+)_v0-4', 'lhs_v': '-1', 'rhs_v': '-1', 'process': 'excitation'}], [{'product': 'v5-9', 'background': 'V=5-9'}, {'rhs_a': 'N2(A3Sigmau+)_v5-9', 'lhs_v': '-1', 'rhs_v': '-1', 'process': 'excitation'}], [{'product': 'v10-', 'background': 'V=10-'}, {'rhs_a': 'N2(A3Sigmau+)_v10-', 'lhs_v': '-1', 'rhs_v': '-1', 'process': 'excitation'}], [{'kind': 'IONIZATION', 'product': 'N2\^\+'}, {'process': 'ionization_total', 'lhs_a': states['X1'], 'rhs_a': 'N2+', 'models': ['fict', 'fict_min', 'fict_min2']}], [{'kind': 'EXCITATION', 'product': 'rot', 'background': 'SLAR'}, {'process': 'excitation', 'lhs_a': states['X1'], 'rhs_a': 'N2(X1Sigmag+)_jSLAR', 'models': ['fict', 'fict_min2']}], [{'kind': 'IONIZATION', 'product': 'B2SIGMA'}, {'process': 'ionization', 'lhs_a': states['X1'], 'rhs_a': states['B2SIGMA'], 'models': ['fict']}], [{'kind': 'EFFECTIVE'}, {'process': 'total', 'lhs_a': 'N2', 'rhs_a': 'N2', 'models': ['fict', 'fict_min', 'fict_min2']}], [{'kind': 'EXCITATION', 'product': 'rot', 'background': 'SCHULZ'}, {'process': 'excitation', 'lhs_a': states['X1'], 'rhs_a': 'N2(X1Sigmag+)_jSCHULZ', 'models': ['fict']}]] curate_client(CurateLxCAT(), datadir, species='n2', title='fict', units_e='1.0', units_sigma='1.0', augment_dicts=augment_dicts, initialize_nepc=True, test=True)
import abc import collections import dataclasses import logging import os import subprocess import tempfile import traceback import typing import version import yaml from github3.exceptions import ( ConnectionError, NotFoundError, ) import gci.componentmodel as cm import ci.util from ci.util import ( existing_file, existing_dir, not_empty, not_none, ) import cnudie.retrieve import cnudie.util import dockerutil from gitutil import GitHelper from github.util import ( GitHubRepositoryHelper, GitHubRepoBranch, ) import product.v2 from github.release_notes.util import ( delete_file_from_slack, fetch_release_notes, post_to_slack, ReleaseNotes, ) from concourse.model.traits.release import ( ReleaseNotesPolicy, ReleaseCommitPublishingPolicy, ) import model.container_registry as cr import oci.model logger = logging.getLogger('step.release') class TransactionContext: def __init__(self): self._step_outputs = {} def has_output(self, step_name: str): return step_name in self._step_outputs.keys() def step_output(self, step_name: str): return self._step_outputs[step_name] def set_step_output(self, step_name: str, output): if self.has_output(step_name): raise RuntimeError(f"Context already contains output of step '{step_name}'") self._step_outputs[step_name] = output class TransactionalStep(metaclass=abc.ABCMeta): '''Abstract base class for operations that are to be executed with transactional semantics. Instances represent operations which typically cause external and persistent side effects. Typically, a sequence of (different) steps are grouped in a `Transaction` Subclasses *may* overwrite the `validate` method, which performs optional checks that indicate whether the operation would probably fail. Those checks are intended to be run for all steps of a `Transaction` before actually executing it. Validation *must not* cause any persistent side effects to external resources. Subclasses *must* overwrite the `apply` method, which performs the actual payload of the step, typically resulting in persistent external side effects. The `apply` method *may* also return an object (e.g.: a `dict`) that is then made available to later steps when part of a `Transaction`. Subclasses *must* overwrite the `revert` method, which reverts any persistent external side effects previously created by running the step's `apply` method. This should take into account that the execution of the `apply` method may or may not have succeeded, failed, or failed partially. ''' def set_context(self, context: TransactionContext): self._context = context def context(self): return self._context def validate(self): pass @abc.abstractmethod def apply(self): return None @abc.abstractmethod def revert(self): pass @abc.abstractmethod def name(self): pass class Transaction: '''Represents a transaction using `TransactionalStep`s After creation, invoke `validate` to have the transaction validate all steps. Invoke `execute` to execute all steps. Both operations are done in the original step order. Upon encountered errors, all steps that were already executed are reverted in inverse execution order. ''' def __init__( self, ctx: TransactionContext, steps: typing.Iterable[TransactionalStep], ): self._context = ci.util.check_type(ctx, TransactionContext) # validate type of args and set context for step in steps: ci.util.check_type(step, TransactionalStep) step.set_context(self._context) self._steps = steps def validate(self): for step in self._steps: logger.info(f'validating {step.name()=}') step.validate() def execute(self): executed_steps = list() for step in self._steps: step_name = step.name() logger.info(f'executing {step_name=}') executed_steps.append(step) try: output = step.apply() self._context.set_step_output(step_name, output) except BaseException as e: logger.warning(f'An error occured while applying {step_name=} {e=}') traceback.print_exc() # revert the changes attempted, in reverse order self._revert(reversed(executed_steps)) # do not execute apply for remaining steps return False return True def _revert(self, steps): # attempt to revert each step. Raise an exception if not all reverts succeeded. all_reverted = True for step in steps: step_name = step.name() logger.info(f'reverting {step_name=}') try: step.revert() except BaseException as e: all_reverted = False logger.warning(f'An error occured while reverting step {step_name=}: {e=}') traceback.print_exc() if not all_reverted: raise RuntimeError('Unable to revert all steps.') class RebaseStep(TransactionalStep): def __init__(self, git_helper: GitHelper, repository_branch: str): self.git_helper = not_none(git_helper) self.repository_branch = not_empty(repository_branch) def name(self): return f'Rebase against {self.repository_branch}' def apply(self): upstream_commit_sha = self.git_helper.fetch_head( f'refs/heads/{self.repository_branch}' ).hexsha self.git_helper.rebase(commit_ish=upstream_commit_sha) def revert(self): pass class ReleaseCommitStep(TransactionalStep): def __init__( self, git_helper: GitHelper, repo_dir: str, release_version: str, repository_version_file_path: str, repository_branch: str, release_commit_message_prefix: str, publishing_policy: ReleaseCommitPublishingPolicy, release_commit_callback_image_reference: str, release_commit_callback: str=None, ): self.git_helper = not_none(git_helper) self.repository_branch = not_empty(repository_branch) self.repo_dir = os.path.abspath(repo_dir) self.release_version = not_empty(release_version) self.repository_version_file_path = os.path.join( self.repo_dir, repository_version_file_path, ) self.release_commit_message_prefix = release_commit_message_prefix self.publishing_policy = publishing_policy self.release_commit_callback_image_reference = release_commit_callback_image_reference self.release_commit_callback = release_commit_callback self.head_commit = None # stored while applying - used for revert def _release_commit_message(self, version: str, release_commit_message_prefix: str=''): message = f'Release {version}' if release_commit_message_prefix: return f'{release_commit_message_prefix} {message}' else: return message def name(self): return 'Create Release Commit' def validate(self): existing_dir(self.repo_dir) version.parse_to_semver(self.release_version) if(self.release_commit_callback): existing_file( os.path.join( self.repo_dir, self.release_commit_callback, ) ) existing_file(self.repository_version_file_path) def apply(self): # clean repository if required worktree_dirty = bool(self.git_helper._changed_file_paths()) if worktree_dirty: self.git_helper.repo.head.reset(working_tree=True) # store head-commit (type: git.Commit) self.head_commit = self.git_helper.repo.head.commit self.context().head_commit = self.head_commit # pass to other steps # prepare release commit with open(self.repository_version_file_path, 'w') as f: f.write(self.release_version) # call optional release commit callback if self.release_commit_callback: _invoke_callback( callback_script_path=self.release_commit_callback, repo_dir=self.repo_dir, effective_version=self.release_version, callback_image_reference=self.release_commit_callback_image_reference, ) release_commit = self.git_helper.index_to_commit( message=self._release_commit_message( self.release_version, self.release_commit_message_prefix ), ) self.context().release_commit = release_commit # pass to other steps if self.publishing_policy is ReleaseCommitPublishingPolicy.TAG_AND_PUSH_TO_BRANCH: # push commit to remote self.git_helper.push( from_ref=release_commit.hexsha, to_ref=self.repository_branch ) elif self.publishing_policy is ReleaseCommitPublishingPolicy.TAG_ONLY: # handled when creating all release tags pass else: raise NotImplementedError return { 'release_commit_sha1': release_commit.hexsha, } def revert(self): if not self.context().has_output(self.name()): # push unsuccessful, nothing to do return else: if self.publishing_policy is ReleaseCommitPublishingPolicy.TAG_AND_PUSH_TO_BRANCH: output = self.context().step_output(self.name()) # create revert commit for the release commit and push it, but first # clean repository if required worktree_dirty = bool(self.git_helper._changed_file_paths()) if worktree_dirty: self.git_helper.repo.head.reset(working_tree=True) self.git_helper.repo.git.revert( output['release_commit_sha1'], no_edit=True, no_commit=True, ) release_revert_commit = _add_all_and_create_commit( git_helper=self.git_helper, message=f"Revert '{self._release_commit_message(self.release_version)}'" ) self.git_helper.push( from_ref=release_revert_commit.hexsha, to_ref=self.repository_branch, ) elif self.publishing_policy is ReleaseCommitPublishingPolicy.TAG_ONLY: # is handled in the step that creates the tags return else: raise NotImplementedError class CreateTagsStep(TransactionalStep): def __init__( self, author_email, author_name, github_release_tag, git_tags, github_helper, git_helper, release_version, publishing_policy: ReleaseCommitPublishingPolicy ): self.github_helper = github_helper self.git_helper = git_helper self.author_name = author_name self.author_email = author_email self.publishing_policy = publishing_policy self.release_version = release_version tag_template_vars = {'VERSION': self.release_version} # render tag-templates self.github_release_tag = github_release_tag['ref_template'].format( **tag_template_vars ) self.git_tags = [ tag_template['ref_template'].format(**tag_template_vars) for tag_template in git_tags ] def name(self): return 'Create Tags' def validate(self): tags_to_set = [self.github_release_tag] + self.git_tags _, existing_tags = self.git_helper.check_tag_availability(tags_to_set) if(existing_tags): ci.util.fail( 'Cannot create the following tags as they already exist in the ' f'repository: {', '.join(existing_tags)}' ) def apply( self, ): release_commit_step_output = self.context().step_output('Create Release Commit') release_commit_sha = release_commit_step_output['release_commit_sha1'] # depending on the publishing policy either push the release commit to all tag-refs or # create tags pointing to the commit on the release-branch self.tags_created = [] if self.publishing_policy in [ ReleaseCommitPublishingPolicy.TAG_ONLY, ReleaseCommitPublishingPolicy.TAG_AND_PUSH_TO_BRANCH, ]: def _push_tag(tag): self.git_helper.push( from_ref=release_commit_sha, to_ref=tag, ) self.tags_created.append(tag) for tag in [self.github_release_tag] + self.git_tags: _push_tag(tag) else: raise NotImplementedError return { 'release_tag': self.github_release_tag, 'tags': self.git_tags, } def revert(self): for tag in self.tags_created: if self.publishing_policy in [ ReleaseCommitPublishingPolicy.TAG_ONLY, ReleaseCommitPublishingPolicy.TAG_AND_PUSH_TO_BRANCH, ]: self.git_helper.push( from_ref='', to_ref=tag, ) else: raise NotImplementedError class NextDevCycleCommitStep(TransactionalStep): def __init__( self, git_helper: GitHelper, repo_dir: str, release_version: str, repository_version_file_path: str, repository_branch: str, version_operation: str, prerelease_suffix: str, publishing_policy: ReleaseCommitPublishingPolicy, next_cycle_commit_message_prefix: str=None, next_version_callback: str=None, ): self.git_helper = not_none(git_helper) self.repository_branch = not_empty(repository_branch) self.repo_dir = os.path.abspath(repo_dir) self.release_version = not_empty(release_version) self.version_operation = not_empty(version_operation) self.prerelease_suffix = not_empty(prerelease_suffix) self.publishing_policy = publishing_policy self.next_cycle_commit_message_prefix = next_cycle_commit_message_prefix self.repository_version_file_path = os.path.join( self.repo_dir, repository_version_file_path, ) self.next_version_callback = next_version_callback def _next_dev_cycle_commit_message(self, version: str, message_prefix: str): message = f'Prepare next Dev Cycle {version}' if message_prefix: message = f'{message_prefix} {message}' return message def name(self): return 'Create next development cycle commit' def validate(self): existing_dir(self.repo_dir) version.parse_to_semver(self.release_version) if self.next_version_callback: existing_file( os.path.join( self.repo_dir, self.next_version_callback, ) ) existing_file(self.repository_version_file_path) # perform version ops once to validate args _calculate_next_cycle_dev_version( release_version=self.release_version, version_operation=self.version_operation, prerelease_suffix=self.prerelease_suffix, ) def apply(self): # clean repository if required worktree_dirty = bool(self.git_helper._changed_file_paths()) if worktree_dirty: if self.publishing_policy is ReleaseCommitPublishingPolicy.TAG_AND_PUSH_TO_BRANCH: reset_to = self.context().release_commit elif self.publishing_policy is ReleaseCommitPublishingPolicy.TAG_ONLY: reset_to = 'HEAD' else: raise NotImplementedError self.git_helper.repo.head.reset( commit=reset_to, index=True, working_tree=True, ) # prepare next dev cycle commit next_version = _calculate_next_cycle_dev_version( release_version=self.release_version, version_operation=self.version_operation, prerelease_suffix=self.prerelease_suffix, ) logger.info(f'{next_version=}') with open(self.repository_version_file_path, 'w') as f: f.write(next_version) # call optional dev cycle commit callback if self.next_version_callback: _invoke_callback( callback_script_path=self.next_version_callback, repo_dir=self.repo_dir, effective_version=next_version, ) # depending on publishing-policy, bump-commit should become successor of # either the release commit, or just be pushed to branch-head if self.publishing_policy is ReleaseCommitPublishingPolicy.TAG_AND_PUSH_TO_BRANCH: parent_commits = [self.context().release_commit] elif self.publishing_policy is ReleaseCommitPublishingPolicy.TAG_ONLY: parent_commits = None # default to current branch head next_cycle_commit = self.git_helper.index_to_commit( message=self._next_dev_cycle_commit_message( version=next_version, message_prefix=self.next_cycle_commit_message_prefix, ), parent_commits=parent_commits, ) # Push commit to remote self.git_helper.push( from_ref=next_cycle_commit.hexsha, to_ref=self.repository_branch, ) return { 'next cycle commit sha': next_cycle_commit.hexsha, } def revert(self): if not self.context().has_output(self.name()): # push unsuccessful, nothing to do return else: output = self.context().step_output(self.name()) # create revert commit for the next dev cycle commit and push it, but first # clean repository if required worktree_dirty = bool(self.git_helper._changed_file_paths()) if worktree_dirty: self.git_helper.repo.head.reset(working_tree=True) next_cycle_dev_version = _calculate_next_cycle_dev_version( release_version=self.release_version, version_operation=self.version_operation, prerelease_suffix=self.prerelease_suffix, ) commit_message = self._next_dev_cycle_commit_message( version=next_cycle_dev_version, message_prefix=self.next_cycle_commit_message_prefix, ) self.git_helper.repo.git.revert( output['next cycle commit sha'], no_edit=True, no_commit=True, ) next_cycle_revert_commit = _add_all_and_create_commit( git_helper=self.git_helper, message=f"Revert '{commit_message}'" ) self.git_helper.push( from_ref=next_cycle_revert_commit.hexsha, to_ref=self.repository_branch, ) class GitHubReleaseStep(TransactionalStep): def __init__( self, github_helper: GitHubRepositoryHelper, githubrepobranch: GitHubRepoBranch, repo_dir: str, component_name: str, release_version: str, ): self.github_helper = not_none(github_helper) self.githubrepobranch = githubrepobranch self.release_version = not_empty(release_version) self.repo_dir = repo_dir self.component_name = component_name def name(self): return "Create Release" def validate(self): version.parse_to_semver(self.release_version) def apply( self, ): create_tags_step_output = self.context().step_output('Create Tags') release_tag = create_tags_step_output['release_tag'] # github3.py expects the tags's name, not the whole ref if release_tag.startswith('refs/tags/'): release_tag = release_tag[10:] else: raise RuntimeError( f'unexpected {release_tag=}. Expected a ref, e.g. `refs/tags/foo`' ) # Create GitHub-release if release := self.github_helper.draft_release_with_name(f'{self.release_version}-draft'): self.github_helper.promote_draft_release( draft_release=release, release_tag=release_tag, release_version=self.release_version, component_name=self.component_name, ) else: release = self.github_helper.create_release( tag_name=release_tag, body="", draft=False, prerelease=False, name=self.release_version, component_name=self.component_name, ) return { 'release_tag_name': release_tag, } def revert(self): # Fetch release try: release = self.github_helper.repository.release_from_tag(self.release_version) except NotFoundError: release = None if release: logger.info(f'Deleting {self.release_version=}') if not release.delete(): raise RuntimeError("Release could not be deleted") class UploadComponentDescriptorStep(TransactionalStep): def __init__( self, github_helper: GitHubRepositoryHelper, component_descriptor_v2_path: str, ctf_path:str, ): self.github_helper = not_none(github_helper) self.component_descriptor_v2_path = component_descriptor_v2_path self.ctf_path = ctf_path def name(self): return "Upload Component Descriptor" def validate(self): # either cds _XOR_ ctf must exist have_ctf = os.path.exists(self.ctf_path) have_cd = os.path.exists(self.component_descriptor_v2_path) if not have_ctf ^ have_cd: ci.util.fail('exactly one of component-descriptor, or ctf-archive must exist') elif have_cd: self.components = [cm.ComponentDescriptor.from_dict( component_descriptor_dict=ci.util.parse_yaml_file(self.component_descriptor_v2_path), validation_mode=cm.ValidationMode.FAIL, )] elif have_ctf: self.components = tuple(cnudie.util.component_descriptors_from_ctf_archive( self.ctf_path, )) if not self.components: ci.util.fail(f'No component descriptor found in CTF archive at {self.ctf_path=}') for component_descriptor_v2 in self.components: try: collections.deque( cnudie.retrieve.components(component=component_descriptor_v2), maxlen=0, ) except oci.model.OciImageNotFoundException as e: logger.warning( 'Error when retrieving the Component Descriptor of a component referenced in ' f"this component's Component Descriptor: {e}" ) def apply( self, ): create_release_step_output = self.context().step_output('Create Release') release_tag_name = create_release_step_output['release_tag_name'] if os.path.exists(self.component_descriptor_v2_path): component_descriptor_v2 = cm.ComponentDescriptor.from_dict( component_descriptor_dict=ci.util.parse_yaml_file( self.component_descriptor_v2_path ), ) component = component_descriptor_v2.component tgt_ref = product.v2._target_oci_ref(component=component) logger.info(f'publishing CNUDIE-Component-Descriptor to {tgt_ref=}') product.v2.upload_component_descriptor_v2_to_oci_registry( component_descriptor_v2=component_descriptor_v2, ) elif os.path.exists(self.ctf_path): logger.info('processing CTF-archive') subprocess.run( [ 'component-cli', 'ctf', 'push', self.ctf_path, ], check=True, ) try: release = self.github_helper.repository.release_from_tag(release_tag_name) for component_descriptor_v2 in self.components: descriptor_str = yaml.dump( data=dataclasses.asdict(component_descriptor_v2), Dumper=cm.EnumValueYamlDumper, ) normalized_component_name = component_descriptor_v2.component.name.replace('/', '_') asset_name = f'{normalized_component_name}.component_descriptor.cnudie.yaml' release.upload_asset( content_type='application/x-yaml', name=asset_name, asset=descriptor_str.encode('utf-8'), label=asset_name, ) except ConnectionError: logger.warning('Unable to attach component-descriptors to release as release-asset.') def revert(self): pass class PublishReleaseNotesStep(TransactionalStep): def name(self): return "Publish Release Notes" def __init__( self, githubrepobranch: GitHubRepoBranch, github_helper: GitHubRepositoryHelper, repository_hostname: str, repository_path: str, component_descriptor_v2_path: str, release_version: str, ctf_path: str, repo_dir: str, ): self.repository_hostname = repository_hostname self.repository_path = repository_path self.githubrepobranch = not_none(githubrepobranch) self.github_helper = not_none(github_helper) self.release_version = not_empty(release_version) self.repo_dir = os.path.abspath(not_empty(repo_dir)) self.component_descriptor_v2_path = component_descriptor_v2_path self.ctf_path = ctf_path def validate(self): version.parse_to_semver(self.release_version) existing_dir(self.repo_dir) try: self.component_descriptor_v2 = cnudie.util.determine_main_component( repository_hostname=self.repository_hostname, repository_path=self.repository_path, component_descriptor_v2_path=self.component_descriptor_v2_path, ctf_path=self.ctf_path, ) except ValueError as err: ci.util.fail(str(err)) def apply(self): create_release_step_output = self.context().step_output('Create Release') release_tag = create_release_step_output['release_tag_name'] release_notes = fetch_release_notes( self.component_descriptor_v2.component, repo_dir=self.repo_dir, repository_branch=self.githubrepobranch.branch(), ) release_notes_md = release_notes.to_markdown(force_link_generation=True) self.github_helper.update_release_notes( tag_name=release_tag, body=release_notes_md, component_name=self.component_descriptor_v2.component.name, ) return { 'release notes': release_notes, 'release notes markdown': release_notes_md, } def revert(self): if not self.context().has_output(self.name()): # Updating release notes was unsuccessful, nothing to do return # purge release notes self.github_helper.update_release_notes( tag_name=self.release_version, body='', ) class TryCleanupDraftReleasesStep(TransactionalStep): def name(self): return "Try to Cleanup Draft Releases" def __init__( self, github_helper: GitHubRepositoryHelper, ): self.github_helper = not_none(github_helper) def validate(self): # nothing to validate pass def apply(self): for release, deletion_successful in self.github_helper.delete_outdated_draft_releases(): if deletion_successful: logger.info(f'Deleted {release.name=}') else: logger.warning(f'Could not delete {release.name=}') return def revert(self): # nothing to revert pass class PostSlackReleaseStep(TransactionalStep): def name(self): return f"Post Slack Release ({self.slack_channel})" def __init__( self, slack_cfg_name: str, slack_channel: str, release_version: str, release_notes: ReleaseNotes, githubrepobranch: GitHubRepoBranch, ): self.slack_cfg_name = not_empty(slack_cfg_name) self.slack_channel = not_empty(slack_channel) self.release_version = not_empty(release_version) self.githubrepobranch = not_none(githubrepobranch) self.release_notes = not_none(release_notes) def validate(self): version.parse_to_semver(self.release_version) def apply(self): responses = post_to_slack( release_notes=self.release_notes, github_repository_name=self.githubrepobranch.github_repo_path(), slack_cfg_name=self.slack_cfg_name, slack_channel=self.slack_channel, release_version=self.release_version, ) for response in responses: if response and response.get('file', None): uploaded_file_id = response.get('file').get('id') logger.info(f'uploaded {uploaded_file_id=} to slack') else: raise RuntimeError('Unable to get file id from Slack response') logger.info('successfully posted contents to slack') def revert(self): if not self.context().has_output(self.name()): # Posting the release notes was unsuccessful, nothing to revert return uploaded_file_id = self.context().step_output(self.name()).get('uploaded file id') delete_file_from_slack( slack_cfg_name=self.slack_cfg_name, file_id=uploaded_file_id, ) def _invoke_callback( callback_script_path: str, repo_dir: str, effective_version: str, callback_image_reference: str=None, ): callback_env = os.environ.copy() callback_env['EFFECTIVE_VERSION'] = effective_version if callback_image_reference: repo_dir_in_container = '/mnt/main_repo' callback_env['REPO_DIR'] = repo_dir_in_container else: callback_env['REPO_DIR'] = repo_dir if not callback_image_reference: callback_script_path = os.path.join( repo_dir, callback_script_path, ) subprocess.run( [callback_script_path], check=True, env=callback_env, ) else: script_path_in_container = os.path.join( repo_dir_in_container, callback_script_path, ) oci_registry_cfg = cr.find_config(image_reference=callback_image_reference) if oci_registry_cfg: docker_cfg_dir = tempfile.TemporaryDirectory() dockerutil.mk_docker_cfg_dir( cfg={'auths': oci_registry_cfg.as_docker_auths()}, cfg_dir=docker_cfg_dir.name, exist_ok=True, ) else: docker_cfg_dir = None docker_argv = dockerutil.docker_run_argv( image_reference=callback_image_reference, argv=(script_path_in_container,), env=callback_env, mounts={ repo_dir: repo_dir_in_container, }, cfg_dir=docker_cfg_dir.name, ) dockerutil.launch_dockerd_if_not_running() logger.info(f'will run callback using {docker_argv=}') try: subprocess.run( docker_argv, check=True, ) finally: if docker_cfg_dir: docker_cfg_dir.cleanup() def _add_all_and_create_commit(git_helper: GitHelper, message: str): commit = git_helper.index_to_commit( message=message, ) git_helper.repo.head.reset( commit=commit, working_tree=True, ) return commit def _calculate_next_cycle_dev_version( release_version: str, version_operation: str, prerelease_suffix: str, ): # calculate the next version and append the prerelease suffix return version.process_version( version_str=version.process_version( version_str=release_version, operation=version_operation, ), operation='set_prerelease', prerelease=prerelease_suffix, ) def release_and_prepare_next_dev_cycle( component_name: str, githubrepobranch: GitHubRepoBranch, release_commit_publishing_policy: str, release_notes_policy: str, release_version: str, repo_hostname: str, repo_path: str, repo_dir: str, repository_version_file_path: str, git_tags: list, github_release_tag: dict, release_commit_callback_image_reference: str, author_email: str="gardener.ci.user@gmail.com", author_name: str="gardener-ci", component_descriptor_v2_path: str=None, ctf_path: str=None, next_cycle_commit_message_prefix: str=None, next_version_callback: str=None, prerelease_suffix: str="dev", rebase_before_release: bool=False, release_commit_callback: str=None, release_commit_message_prefix: str=None, slack_channel_configs: list=[], version_operation: str="bump_minor", ): transaction_ctx = TransactionContext() # shared between all steps/trxs release_notes_policy = ReleaseNotesPolicy(release_notes_policy) release_commit_publishing_policy = ReleaseCommitPublishingPolicy( release_commit_publishing_policy ) github_helper = GitHubRepositoryHelper.from_githubrepobranch(githubrepobranch) git_helper = GitHelper.from_githubrepobranch( githubrepobranch=githubrepobranch, repo_path=repo_dir, ) step_list = [] if rebase_before_release: rebase_step = RebaseStep( git_helper=git_helper, repository_branch=githubrepobranch.branch(), ) step_list.append(rebase_step) release_commit_step = ReleaseCommitStep( git_helper=git_helper, repo_dir=repo_dir, release_version=release_version, repository_version_file_path=repository_version_file_path, repository_branch=githubrepobranch.branch(), release_commit_message_prefix=release_commit_message_prefix, release_commit_callback=release_commit_callback, release_commit_callback_image_reference=release_commit_callback_image_reference, publishing_policy=release_commit_publishing_policy, ) step_list.append(release_commit_step) create_tag_step = CreateTagsStep( author_email=author_email, author_name=author_name, git_tags=git_tags, github_release_tag=github_release_tag, git_helper=git_helper, github_helper=github_helper, release_version=release_version, publishing_policy=release_commit_publishing_policy, ) step_list.append(create_tag_step) if version_operation != version.NOOP: next_cycle_commit_step = NextDevCycleCommitStep( git_helper=git_helper, repo_dir=repo_dir, release_version=release_version, repository_version_file_path=repository_version_file_path, repository_branch=githubrepobranch.branch(), version_operation=version_operation, prerelease_suffix=prerelease_suffix, next_version_callback=next_version_callback, publishing_policy=release_commit_publishing_policy, next_cycle_commit_message_prefix=next_cycle_commit_message_prefix, ) step_list.append(next_cycle_commit_step) github_release_step = GitHubReleaseStep( github_helper=github_helper, githubrepobranch=githubrepobranch, repo_dir=repo_dir, component_name=component_name, release_version=release_version, ) step_list.append(github_release_step) upload_component_descriptor_step = UploadComponentDescriptorStep( github_helper=github_helper, component_descriptor_v2_path=component_descriptor_v2_path, ctf_path=ctf_path, ) step_list.append(upload_component_descriptor_step) release_transaction = Transaction( ctx=transaction_ctx, steps=step_list, ) release_transaction.validate() if not release_transaction.execute(): raise RuntimeError('An error occurred while creating the Release.') publish_release_notes_step = PublishReleaseNotesStep( githubrepobranch=githubrepobranch, github_helper=github_helper, repository_hostname=repo_hostname, repository_path=repo_path, release_version=release_version, component_descriptor_v2_path=component_descriptor_v2_path, ctf_path=ctf_path, repo_dir=repo_dir, ) cleanup_draft_releases_step = TryCleanupDraftReleasesStep( github_helper=github_helper, ) cleanup_draft_releases_transaction = Transaction( ctx=transaction_ctx, steps=(cleanup_draft_releases_step,), ) if not cleanup_draft_releases_transaction.execute(): logger.warning('An error occured while cleaning up draft releases') if release_notes_policy == ReleaseNotesPolicy.DISABLED: return logger.info('release notes were disabled - skipping') elif release_notes_policy == ReleaseNotesPolicy.DEFAULT: pass else: raise NotImplementedError(release_notes_policy) release_notes_transaction = Transaction( ctx=transaction_ctx, steps=(publish_release_notes_step,), ) release_notes_transaction.validate() if not release_notes_transaction.execute(): raise RuntimeError('An error occurred while publishing the release notes.') if slack_channel_configs: release_notes = transaction_ctx.step_output( publish_release_notes_step.name() ).get('release notes') all_slack_releases_successful = True for slack_cfg in slack_channel_configs: slack_cfg_name = slack_cfg['slack_cfg_name'] slack_channel = slack_cfg['channel_name'] post_to_slack_step = PostSlackReleaseStep( slack_cfg_name=slack_cfg_name, slack_channel=slack_channel, release_version=release_version, release_notes=release_notes, githubrepobranch=githubrepobranch, ) slack_transaction = Transaction( ctx=transaction_ctx, steps=(post_to_slack_step,), ) slack_transaction.validate() all_slack_releases_successful = ( all_slack_releases_successful and slack_transaction.execute() ) if not all_slack_releases_successful: raise RuntimeError('An error occurred while posting the release notes to Slack.')
import abc import collections import dataclasses import logging import os import subprocess import tempfile import traceback import typing import version import yaml from github3.exceptions import ( ConnectionError, NotFoundError, ) import gci.componentmodel as cm import ci.util from ci.util import ( existing_file, existing_dir, not_empty, not_none, ) import cnudie.retrieve import cnudie.util import dockerutil from gitutil import GitHelper from github.util import ( GitHubRepositoryHelper, GitHubRepoBranch, ) import product.v2 from github.release_notes.util import ( delete_file_from_slack, fetch_release_notes, post_to_slack, ReleaseNotes, ) from concourse.model.traits.release import ( ReleaseNotesPolicy, ReleaseCommitPublishingPolicy, ) import model.container_registry as cr import oci.model logger = logging.getLogger('step.release') class TransactionContext: def __init__(self): self._step_outputs = {} def has_output(self, step_name: str): return step_name in self._step_outputs.keys() def step_output(self, step_name: str): return self._step_outputs[step_name] def set_step_output(self, step_name: str, output): if self.has_output(step_name): raise RuntimeError(f"Context already contains output of step '{step_name}'") self._step_outputs[step_name] = output class TransactionalStep(metaclass=abc.ABCMeta): '''Abstract base class for operations that are to be executed with transactional semantics. Instances represent operations which typically cause external and persistent side effects. Typically, a sequence of (different) steps are grouped in a `Transaction` Subclasses *may* overwrite the `validate` method, which performs optional checks that indicate whether the operation would probably fail. Those checks are intended to be run for all steps of a `Transaction` before actually executing it. Validation *must not* cause any persistent side effects to external resources. Subclasses *must* overwrite the `apply` method, which performs the actual payload of the step, typically resulting in persistent external side effects. The `apply` method *may* also return an object (e.g.: a `dict`) that is then made available to later steps when part of a `Transaction`. Subclasses *must* overwrite the `revert` method, which reverts any persistent external side effects previously created by running the step's `apply` method. This should take into account that the execution of the `apply` method may or may not have succeeded, failed, or failed partially. ''' def set_context(self, context: TransactionContext): self._context = context def context(self): return self._context def validate(self): pass @abc.abstractmethod def apply(self): return None @abc.abstractmethod def revert(self): pass @abc.abstractmethod def name(self): pass class Transaction: '''Represents a transaction using `TransactionalStep`s After creation, invoke `validate` to have the transaction validate all steps. Invoke `execute` to execute all steps. Both operations are done in the original step order. Upon encountered errors, all steps that were already executed are reverted in inverse execution order. ''' def __init__( self, ctx: TransactionContext, steps: typing.Iterable[TransactionalStep], ): self._context = ci.util.check_type(ctx, TransactionContext) # validate type of args and set context for step in steps: ci.util.check_type(step, TransactionalStep) step.set_context(self._context) self._steps = steps def validate(self): for step in self._steps: logger.info(f'validating {step.name()=}') step.validate() def execute(self): executed_steps = list() for step in self._steps: step_name = step.name() logger.info(f'executing {step_name=}') executed_steps.append(step) try: output = step.apply() self._context.set_step_output(step_name, output) except BaseException as e: logger.warning(f'An error occured while applying {step_name=} {e=}') traceback.print_exc() # revert the changes attempted, in reverse order self._revert(reversed(executed_steps)) # do not execute apply for remaining steps return False return True def _revert(self, steps): # attempt to revert each step. Raise an exception if not all reverts succeeded. all_reverted = True for step in steps: step_name = step.name() logger.info(f'reverting {step_name=}') try: step.revert() except BaseException as e: all_reverted = False logger.warning(f'An error occured while reverting step {step_name=}: {e=}') traceback.print_exc() if not all_reverted: raise RuntimeError('Unable to revert all steps.') class RebaseStep(TransactionalStep): def __init__(self, git_helper: GitHelper, repository_branch: str): self.git_helper = not_none(git_helper) self.repository_branch = not_empty(repository_branch) def name(self): return f'Rebase against {self.repository_branch}' def apply(self): upstream_commit_sha = self.git_helper.fetch_head( f'refs/heads/{self.repository_branch}' ).hexsha self.git_helper.rebase(commit_ish=upstream_commit_sha) def revert(self): pass class ReleaseCommitStep(TransactionalStep): def __init__( self, git_helper: GitHelper, repo_dir: str, release_version: str, repository_version_file_path: str, repository_branch: str, release_commit_message_prefix: str, publishing_policy: ReleaseCommitPublishingPolicy, release_commit_callback_image_reference: str, release_commit_callback: str=None, ): self.git_helper = not_none(git_helper) self.repository_branch = not_empty(repository_branch) self.repo_dir = os.path.abspath(repo_dir) self.release_version = not_empty(release_version) self.repository_version_file_path = os.path.join( self.repo_dir, repository_version_file_path, ) self.release_commit_message_prefix = release_commit_message_prefix self.publishing_policy = publishing_policy self.release_commit_callback_image_reference = release_commit_callback_image_reference self.release_commit_callback = release_commit_callback self.head_commit = None # stored while applying - used for revert def _release_commit_message(self, version: str, release_commit_message_prefix: str=''): message = f'Release {version}' if release_commit_message_prefix: return f'{release_commit_message_prefix} {message}' else: return message def name(self): return 'Create Release Commit' def validate(self): existing_dir(self.repo_dir) version.parse_to_semver(self.release_version) if(self.release_commit_callback): existing_file( os.path.join( self.repo_dir, self.release_commit_callback, ) ) existing_file(self.repository_version_file_path) def apply(self): # clean repository if required worktree_dirty = bool(self.git_helper._changed_file_paths()) if worktree_dirty: self.git_helper.repo.head.reset(working_tree=True) # store head-commit (type: git.Commit) self.head_commit = self.git_helper.repo.head.commit self.context().head_commit = self.head_commit # pass to other steps # prepare release commit with open(self.repository_version_file_path, 'w') as f: f.write(self.release_version) # call optional release commit callback if self.release_commit_callback: _invoke_callback( callback_script_path=self.release_commit_callback, repo_dir=self.repo_dir, effective_version=self.release_version, callback_image_reference=self.release_commit_callback_image_reference, ) release_commit = self.git_helper.index_to_commit( message=self._release_commit_message( self.release_version, self.release_commit_message_prefix ), ) self.context().release_commit = release_commit # pass to other steps if self.publishing_policy is ReleaseCommitPublishingPolicy.TAG_AND_PUSH_TO_BRANCH: # push commit to remote self.git_helper.push( from_ref=release_commit.hexsha, to_ref=self.repository_branch ) elif self.publishing_policy is ReleaseCommitPublishingPolicy.TAG_ONLY: # handled when creating all release tags pass else: raise NotImplementedError return { 'release_commit_sha1': release_commit.hexsha, } def revert(self): if not self.context().has_output(self.name()): # push unsuccessful, nothing to do return else: if self.publishing_policy is ReleaseCommitPublishingPolicy.TAG_AND_PUSH_TO_BRANCH: output = self.context().step_output(self.name()) # create revert commit for the release commit and push it, but first # clean repository if required worktree_dirty = bool(self.git_helper._changed_file_paths()) if worktree_dirty: self.git_helper.repo.head.reset(working_tree=True) self.git_helper.repo.git.revert( output['release_commit_sha1'], no_edit=True, no_commit=True, ) release_revert_commit = _add_all_and_create_commit( git_helper=self.git_helper, message=f"Revert '{self._release_commit_message(self.release_version)}'" ) self.git_helper.push( from_ref=release_revert_commit.hexsha, to_ref=self.repository_branch, ) elif self.publishing_policy is ReleaseCommitPublishingPolicy.TAG_ONLY: # is handled in the step that creates the tags return else: raise NotImplementedError class CreateTagsStep(TransactionalStep): def __init__( self, author_email, author_name, github_release_tag, git_tags, github_helper, git_helper, release_version, publishing_policy: ReleaseCommitPublishingPolicy ): self.github_helper = github_helper self.git_helper = git_helper self.author_name = author_name self.author_email = author_email self.publishing_policy = publishing_policy self.release_version = release_version tag_template_vars = {'VERSION': self.release_version} # render tag-templates self.github_release_tag = github_release_tag['ref_template'].format( **tag_template_vars ) self.git_tags = [ tag_template['ref_template'].format(**tag_template_vars) for tag_template in git_tags ] def name(self): return 'Create Tags' def validate(self): tags_to_set = [self.github_release_tag] + self.git_tags _, existing_tags = self.git_helper.check_tag_availability(tags_to_set) if(existing_tags): ci.util.fail( 'Cannot create the following tags as they already exist in the ' f'repository: {", ".join(existing_tags)}' ) def apply( self, ): release_commit_step_output = self.context().step_output('Create Release Commit') release_commit_sha = release_commit_step_output['release_commit_sha1'] # depending on the publishing policy either push the release commit to all tag-refs or # create tags pointing to the commit on the release-branch self.tags_created = [] if self.publishing_policy in [ ReleaseCommitPublishingPolicy.TAG_ONLY, ReleaseCommitPublishingPolicy.TAG_AND_PUSH_TO_BRANCH, ]: def _push_tag(tag): self.git_helper.push( from_ref=release_commit_sha, to_ref=tag, ) self.tags_created.append(tag) for tag in [self.github_release_tag] + self.git_tags: _push_tag(tag) else: raise NotImplementedError return { 'release_tag': self.github_release_tag, 'tags': self.git_tags, } def revert(self): for tag in self.tags_created: if self.publishing_policy in [ ReleaseCommitPublishingPolicy.TAG_ONLY, ReleaseCommitPublishingPolicy.TAG_AND_PUSH_TO_BRANCH, ]: self.git_helper.push( from_ref='', to_ref=tag, ) else: raise NotImplementedError class NextDevCycleCommitStep(TransactionalStep): def __init__( self, git_helper: GitHelper, repo_dir: str, release_version: str, repository_version_file_path: str, repository_branch: str, version_operation: str, prerelease_suffix: str, publishing_policy: ReleaseCommitPublishingPolicy, next_cycle_commit_message_prefix: str=None, next_version_callback: str=None, ): self.git_helper = not_none(git_helper) self.repository_branch = not_empty(repository_branch) self.repo_dir = os.path.abspath(repo_dir) self.release_version = not_empty(release_version) self.version_operation = not_empty(version_operation) self.prerelease_suffix = not_empty(prerelease_suffix) self.publishing_policy = publishing_policy self.next_cycle_commit_message_prefix = next_cycle_commit_message_prefix self.repository_version_file_path = os.path.join( self.repo_dir, repository_version_file_path, ) self.next_version_callback = next_version_callback def _next_dev_cycle_commit_message(self, version: str, message_prefix: str): message = f'Prepare next Dev Cycle {version}' if message_prefix: message = f'{message_prefix} {message}' return message def name(self): return 'Create next development cycle commit' def validate(self): existing_dir(self.repo_dir) version.parse_to_semver(self.release_version) if self.next_version_callback: existing_file( os.path.join( self.repo_dir, self.next_version_callback, ) ) existing_file(self.repository_version_file_path) # perform version ops once to validate args _calculate_next_cycle_dev_version( release_version=self.release_version, version_operation=self.version_operation, prerelease_suffix=self.prerelease_suffix, ) def apply(self): # clean repository if required worktree_dirty = bool(self.git_helper._changed_file_paths()) if worktree_dirty: if self.publishing_policy is ReleaseCommitPublishingPolicy.TAG_AND_PUSH_TO_BRANCH: reset_to = self.context().release_commit elif self.publishing_policy is ReleaseCommitPublishingPolicy.TAG_ONLY: reset_to = 'HEAD' else: raise NotImplementedError self.git_helper.repo.head.reset( commit=reset_to, index=True, working_tree=True, ) # prepare next dev cycle commit next_version = _calculate_next_cycle_dev_version( release_version=self.release_version, version_operation=self.version_operation, prerelease_suffix=self.prerelease_suffix, ) logger.info(f'{next_version=}') with open(self.repository_version_file_path, 'w') as f: f.write(next_version) # call optional dev cycle commit callback if self.next_version_callback: _invoke_callback( callback_script_path=self.next_version_callback, repo_dir=self.repo_dir, effective_version=next_version, ) # depending on publishing-policy, bump-commit should become successor of # either the release commit, or just be pushed to branch-head if self.publishing_policy is ReleaseCommitPublishingPolicy.TAG_AND_PUSH_TO_BRANCH: parent_commits = [self.context().release_commit] elif self.publishing_policy is ReleaseCommitPublishingPolicy.TAG_ONLY: parent_commits = None # default to current branch head next_cycle_commit = self.git_helper.index_to_commit( message=self._next_dev_cycle_commit_message( version=next_version, message_prefix=self.next_cycle_commit_message_prefix, ), parent_commits=parent_commits, ) # Push commit to remote self.git_helper.push( from_ref=next_cycle_commit.hexsha, to_ref=self.repository_branch, ) return { 'next cycle commit sha': next_cycle_commit.hexsha, } def revert(self): if not self.context().has_output(self.name()): # push unsuccessful, nothing to do return else: output = self.context().step_output(self.name()) # create revert commit for the next dev cycle commit and push it, but first # clean repository if required worktree_dirty = bool(self.git_helper._changed_file_paths()) if worktree_dirty: self.git_helper.repo.head.reset(working_tree=True) next_cycle_dev_version = _calculate_next_cycle_dev_version( release_version=self.release_version, version_operation=self.version_operation, prerelease_suffix=self.prerelease_suffix, ) commit_message = self._next_dev_cycle_commit_message( version=next_cycle_dev_version, message_prefix=self.next_cycle_commit_message_prefix, ) self.git_helper.repo.git.revert( output['next cycle commit sha'], no_edit=True, no_commit=True, ) next_cycle_revert_commit = _add_all_and_create_commit( git_helper=self.git_helper, message=f"Revert '{commit_message}'" ) self.git_helper.push( from_ref=next_cycle_revert_commit.hexsha, to_ref=self.repository_branch, ) class GitHubReleaseStep(TransactionalStep): def __init__( self, github_helper: GitHubRepositoryHelper, githubrepobranch: GitHubRepoBranch, repo_dir: str, component_name: str, release_version: str, ): self.github_helper = not_none(github_helper) self.githubrepobranch = githubrepobranch self.release_version = not_empty(release_version) self.repo_dir = repo_dir self.component_name = component_name def name(self): return "Create Release" def validate(self): version.parse_to_semver(self.release_version) def apply( self, ): create_tags_step_output = self.context().step_output('Create Tags') release_tag = create_tags_step_output['release_tag'] # github3.py expects the tags's name, not the whole ref if release_tag.startswith('refs/tags/'): release_tag = release_tag[10:] else: raise RuntimeError( f'unexpected {release_tag=}. Expected a ref, e.g. `refs/tags/foo`' ) # Create GitHub-release if release := self.github_helper.draft_release_with_name(f'{self.release_version}-draft'): self.github_helper.promote_draft_release( draft_release=release, release_tag=release_tag, release_version=self.release_version, component_name=self.component_name, ) else: release = self.github_helper.create_release( tag_name=release_tag, body="", draft=False, prerelease=False, name=self.release_version, component_name=self.component_name, ) return { 'release_tag_name': release_tag, } def revert(self): # Fetch release try: release = self.github_helper.repository.release_from_tag(self.release_version) except NotFoundError: release = None if release: logger.info(f'Deleting {self.release_version=}') if not release.delete(): raise RuntimeError("Release could not be deleted") class UploadComponentDescriptorStep(TransactionalStep): def __init__( self, github_helper: GitHubRepositoryHelper, component_descriptor_v2_path: str, ctf_path:str, ): self.github_helper = not_none(github_helper) self.component_descriptor_v2_path = component_descriptor_v2_path self.ctf_path = ctf_path def name(self): return "Upload Component Descriptor" def validate(self): # either cds _XOR_ ctf must exist have_ctf = os.path.exists(self.ctf_path) have_cd = os.path.exists(self.component_descriptor_v2_path) if not have_ctf ^ have_cd: ci.util.fail('exactly one of component-descriptor, or ctf-archive must exist') elif have_cd: self.components = [cm.ComponentDescriptor.from_dict( component_descriptor_dict=ci.util.parse_yaml_file(self.component_descriptor_v2_path), validation_mode=cm.ValidationMode.FAIL, )] elif have_ctf: self.components = tuple(cnudie.util.component_descriptors_from_ctf_archive( self.ctf_path, )) if not self.components: ci.util.fail(f'No component descriptor found in CTF archive at {self.ctf_path=}') for component_descriptor_v2 in self.components: try: collections.deque( cnudie.retrieve.components(component=component_descriptor_v2), maxlen=0, ) except oci.model.OciImageNotFoundException as e: logger.warning( 'Error when retrieving the Component Descriptor of a component referenced in ' f"this component's Component Descriptor: {e}" ) def apply( self, ): create_release_step_output = self.context().step_output('Create Release') release_tag_name = create_release_step_output['release_tag_name'] if os.path.exists(self.component_descriptor_v2_path): component_descriptor_v2 = cm.ComponentDescriptor.from_dict( component_descriptor_dict=ci.util.parse_yaml_file( self.component_descriptor_v2_path ), ) component = component_descriptor_v2.component tgt_ref = product.v2._target_oci_ref(component=component) logger.info(f'publishing CNUDIE-Component-Descriptor to {tgt_ref=}') product.v2.upload_component_descriptor_v2_to_oci_registry( component_descriptor_v2=component_descriptor_v2, ) elif os.path.exists(self.ctf_path): logger.info('processing CTF-archive') subprocess.run( [ 'component-cli', 'ctf', 'push', self.ctf_path, ], check=True, ) try: release = self.github_helper.repository.release_from_tag(release_tag_name) for component_descriptor_v2 in self.components: descriptor_str = yaml.dump( data=dataclasses.asdict(component_descriptor_v2), Dumper=cm.EnumValueYamlDumper, ) normalized_component_name = component_descriptor_v2.component.name.replace('/', '_') asset_name = f'{normalized_component_name}.component_descriptor.cnudie.yaml' release.upload_asset( content_type='application/x-yaml', name=asset_name, asset=descriptor_str.encode('utf-8'), label=asset_name, ) except ConnectionError: logger.warning('Unable to attach component-descriptors to release as release-asset.') def revert(self): pass class PublishReleaseNotesStep(TransactionalStep): def name(self): return "Publish Release Notes" def __init__( self, githubrepobranch: GitHubRepoBranch, github_helper: GitHubRepositoryHelper, repository_hostname: str, repository_path: str, component_descriptor_v2_path: str, release_version: str, ctf_path: str, repo_dir: str, ): self.repository_hostname = repository_hostname self.repository_path = repository_path self.githubrepobranch = not_none(githubrepobranch) self.github_helper = not_none(github_helper) self.release_version = not_empty(release_version) self.repo_dir = os.path.abspath(not_empty(repo_dir)) self.component_descriptor_v2_path = component_descriptor_v2_path self.ctf_path = ctf_path def validate(self): version.parse_to_semver(self.release_version) existing_dir(self.repo_dir) try: self.component_descriptor_v2 = cnudie.util.determine_main_component( repository_hostname=self.repository_hostname, repository_path=self.repository_path, component_descriptor_v2_path=self.component_descriptor_v2_path, ctf_path=self.ctf_path, ) except ValueError as err: ci.util.fail(str(err)) def apply(self): create_release_step_output = self.context().step_output('Create Release') release_tag = create_release_step_output['release_tag_name'] release_notes = fetch_release_notes( self.component_descriptor_v2.component, repo_dir=self.repo_dir, repository_branch=self.githubrepobranch.branch(), ) release_notes_md = release_notes.to_markdown(force_link_generation=True) self.github_helper.update_release_notes( tag_name=release_tag, body=release_notes_md, component_name=self.component_descriptor_v2.component.name, ) return { 'release notes': release_notes, 'release notes markdown': release_notes_md, } def revert(self): if not self.context().has_output(self.name()): # Updating release notes was unsuccessful, nothing to do return # purge release notes self.github_helper.update_release_notes( tag_name=self.release_version, body='', ) class TryCleanupDraftReleasesStep(TransactionalStep): def name(self): return "Try to Cleanup Draft Releases" def __init__( self, github_helper: GitHubRepositoryHelper, ): self.github_helper = not_none(github_helper) def validate(self): # nothing to validate pass def apply(self): for release, deletion_successful in self.github_helper.delete_outdated_draft_releases(): if deletion_successful: logger.info(f'Deleted {release.name=}') else: logger.warning(f'Could not delete {release.name=}') return def revert(self): # nothing to revert pass class PostSlackReleaseStep(TransactionalStep): def name(self): return f"Post Slack Release ({self.slack_channel})" def __init__( self, slack_cfg_name: str, slack_channel: str, release_version: str, release_notes: ReleaseNotes, githubrepobranch: GitHubRepoBranch, ): self.slack_cfg_name = not_empty(slack_cfg_name) self.slack_channel = not_empty(slack_channel) self.release_version = not_empty(release_version) self.githubrepobranch = not_none(githubrepobranch) self.release_notes = not_none(release_notes) def validate(self): version.parse_to_semver(self.release_version) def apply(self): responses = post_to_slack( release_notes=self.release_notes, github_repository_name=self.githubrepobranch.github_repo_path(), slack_cfg_name=self.slack_cfg_name, slack_channel=self.slack_channel, release_version=self.release_version, ) for response in responses: if response and response.get('file', None): uploaded_file_id = response.get('file').get('id') logger.info(f'uploaded {uploaded_file_id=} to slack') else: raise RuntimeError('Unable to get file id from Slack response') logger.info('successfully posted contents to slack') def revert(self): if not self.context().has_output(self.name()): # Posting the release notes was unsuccessful, nothing to revert return uploaded_file_id = self.context().step_output(self.name()).get('uploaded file id') delete_file_from_slack( slack_cfg_name=self.slack_cfg_name, file_id=uploaded_file_id, ) def _invoke_callback( callback_script_path: str, repo_dir: str, effective_version: str, callback_image_reference: str=None, ): callback_env = os.environ.copy() callback_env['EFFECTIVE_VERSION'] = effective_version if callback_image_reference: repo_dir_in_container = '/mnt/main_repo' callback_env['REPO_DIR'] = repo_dir_in_container else: callback_env['REPO_DIR'] = repo_dir if not callback_image_reference: callback_script_path = os.path.join( repo_dir, callback_script_path, ) subprocess.run( [callback_script_path], check=True, env=callback_env, ) else: script_path_in_container = os.path.join( repo_dir_in_container, callback_script_path, ) oci_registry_cfg = cr.find_config(image_reference=callback_image_reference) if oci_registry_cfg: docker_cfg_dir = tempfile.TemporaryDirectory() dockerutil.mk_docker_cfg_dir( cfg={'auths': oci_registry_cfg.as_docker_auths()}, cfg_dir=docker_cfg_dir.name, exist_ok=True, ) else: docker_cfg_dir = None docker_argv = dockerutil.docker_run_argv( image_reference=callback_image_reference, argv=(script_path_in_container,), env=callback_env, mounts={ repo_dir: repo_dir_in_container, }, cfg_dir=docker_cfg_dir.name, ) dockerutil.launch_dockerd_if_not_running() logger.info(f'will run callback using {docker_argv=}') try: subprocess.run( docker_argv, check=True, ) finally: if docker_cfg_dir: docker_cfg_dir.cleanup() def _add_all_and_create_commit(git_helper: GitHelper, message: str): commit = git_helper.index_to_commit( message=message, ) git_helper.repo.head.reset( commit=commit, working_tree=True, ) return commit def _calculate_next_cycle_dev_version( release_version: str, version_operation: str, prerelease_suffix: str, ): # calculate the next version and append the prerelease suffix return version.process_version( version_str=version.process_version( version_str=release_version, operation=version_operation, ), operation='set_prerelease', prerelease=prerelease_suffix, ) def release_and_prepare_next_dev_cycle( component_name: str, githubrepobranch: GitHubRepoBranch, release_commit_publishing_policy: str, release_notes_policy: str, release_version: str, repo_hostname: str, repo_path: str, repo_dir: str, repository_version_file_path: str, git_tags: list, github_release_tag: dict, release_commit_callback_image_reference: str, author_email: str="gardener.ci.user@gmail.com", author_name: str="gardener-ci", component_descriptor_v2_path: str=None, ctf_path: str=None, next_cycle_commit_message_prefix: str=None, next_version_callback: str=None, prerelease_suffix: str="dev", rebase_before_release: bool=False, release_commit_callback: str=None, release_commit_message_prefix: str=None, slack_channel_configs: list=[], version_operation: str="bump_minor", ): transaction_ctx = TransactionContext() # shared between all steps/trxs release_notes_policy = ReleaseNotesPolicy(release_notes_policy) release_commit_publishing_policy = ReleaseCommitPublishingPolicy( release_commit_publishing_policy ) github_helper = GitHubRepositoryHelper.from_githubrepobranch(githubrepobranch) git_helper = GitHelper.from_githubrepobranch( githubrepobranch=githubrepobranch, repo_path=repo_dir, ) step_list = [] if rebase_before_release: rebase_step = RebaseStep( git_helper=git_helper, repository_branch=githubrepobranch.branch(), ) step_list.append(rebase_step) release_commit_step = ReleaseCommitStep( git_helper=git_helper, repo_dir=repo_dir, release_version=release_version, repository_version_file_path=repository_version_file_path, repository_branch=githubrepobranch.branch(), release_commit_message_prefix=release_commit_message_prefix, release_commit_callback=release_commit_callback, release_commit_callback_image_reference=release_commit_callback_image_reference, publishing_policy=release_commit_publishing_policy, ) step_list.append(release_commit_step) create_tag_step = CreateTagsStep( author_email=author_email, author_name=author_name, git_tags=git_tags, github_release_tag=github_release_tag, git_helper=git_helper, github_helper=github_helper, release_version=release_version, publishing_policy=release_commit_publishing_policy, ) step_list.append(create_tag_step) if version_operation != version.NOOP: next_cycle_commit_step = NextDevCycleCommitStep( git_helper=git_helper, repo_dir=repo_dir, release_version=release_version, repository_version_file_path=repository_version_file_path, repository_branch=githubrepobranch.branch(), version_operation=version_operation, prerelease_suffix=prerelease_suffix, next_version_callback=next_version_callback, publishing_policy=release_commit_publishing_policy, next_cycle_commit_message_prefix=next_cycle_commit_message_prefix, ) step_list.append(next_cycle_commit_step) github_release_step = GitHubReleaseStep( github_helper=github_helper, githubrepobranch=githubrepobranch, repo_dir=repo_dir, component_name=component_name, release_version=release_version, ) step_list.append(github_release_step) upload_component_descriptor_step = UploadComponentDescriptorStep( github_helper=github_helper, component_descriptor_v2_path=component_descriptor_v2_path, ctf_path=ctf_path, ) step_list.append(upload_component_descriptor_step) release_transaction = Transaction( ctx=transaction_ctx, steps=step_list, ) release_transaction.validate() if not release_transaction.execute(): raise RuntimeError('An error occurred while creating the Release.') publish_release_notes_step = PublishReleaseNotesStep( githubrepobranch=githubrepobranch, github_helper=github_helper, repository_hostname=repo_hostname, repository_path=repo_path, release_version=release_version, component_descriptor_v2_path=component_descriptor_v2_path, ctf_path=ctf_path, repo_dir=repo_dir, ) cleanup_draft_releases_step = TryCleanupDraftReleasesStep( github_helper=github_helper, ) cleanup_draft_releases_transaction = Transaction( ctx=transaction_ctx, steps=(cleanup_draft_releases_step,), ) if not cleanup_draft_releases_transaction.execute(): logger.warning('An error occured while cleaning up draft releases') if release_notes_policy == ReleaseNotesPolicy.DISABLED: return logger.info('release notes were disabled - skipping') elif release_notes_policy == ReleaseNotesPolicy.DEFAULT: pass else: raise NotImplementedError(release_notes_policy) release_notes_transaction = Transaction( ctx=transaction_ctx, steps=(publish_release_notes_step,), ) release_notes_transaction.validate() if not release_notes_transaction.execute(): raise RuntimeError('An error occurred while publishing the release notes.') if slack_channel_configs: release_notes = transaction_ctx.step_output( publish_release_notes_step.name() ).get('release notes') all_slack_releases_successful = True for slack_cfg in slack_channel_configs: slack_cfg_name = slack_cfg['slack_cfg_name'] slack_channel = slack_cfg['channel_name'] post_to_slack_step = PostSlackReleaseStep( slack_cfg_name=slack_cfg_name, slack_channel=slack_channel, release_version=release_version, release_notes=release_notes, githubrepobranch=githubrepobranch, ) slack_transaction = Transaction( ctx=transaction_ctx, steps=(post_to_slack_step,), ) slack_transaction.validate() all_slack_releases_successful = ( all_slack_releases_successful and slack_transaction.execute() ) if not all_slack_releases_successful: raise RuntimeError('An error occurred while posting the release notes to Slack.')
import math import os import sys import tempfile from datetime import date, datetime, time, timedelta from decimal import Decimal from enum import Enum, IntEnum from ipaddress import IPv4Address, IPv4Interface, IPv4Network, IPv6Address, IPv6Interface, IPv6Network from pathlib import Path from typing import ( Any, Callable, Dict, FrozenSet, Generic, Iterable, List, NewType, Optional, Set, Tuple, TypeVar, Union, ) from uuid import UUID import pytest from pydantic import BaseModel, Extra, Field, ValidationError, conlist, conset, validator from pydantic.color import Color from pydantic.dataclasses import dataclass from pydantic.networks import AnyUrl, EmailStr, IPvAnyAddress, IPvAnyInterface, IPvAnyNetwork, NameEmail, stricturl from pydantic.schema import ( get_flat_models_from_model, get_flat_models_from_models, get_model_name_map, model_process_schema, model_schema, schema, ) from pydantic.types import ( UUID1, UUID3, UUID4, UUID5, ConstrainedBytes, ConstrainedDecimal, ConstrainedFloat, ConstrainedInt, ConstrainedStr, DirectoryPath, FilePath, Json, NegativeFloat, NegativeInt, NoneBytes, NoneStr, NoneStrBytes, NonNegativeFloat, NonNegativeInt, NonPositiveFloat, NonPositiveInt, PositiveFloat, PositiveInt, PyObject, SecretBytes, SecretStr, StrBytes, StrictBool, StrictStr, conbytes, condecimal, confloat, conint, constr, ) from pydantic.typing import Literal try: import email_validator except ImportError: email_validator = None def test_key(): class ApplePie(BaseModel): """ This is a test. """ a: float b: int = 10 s = { 'type': 'object', 'properties': {'a': {'type': 'number', 'title': 'A'}, 'b': {'type': 'integer', 'title': 'B', 'default': 10}}, 'required': ['a'], 'title': 'ApplePie', 'description': 'This is a test.', } assert ApplePie.__schema_cache__.keys() == set() assert ApplePie.schema() == s assert ApplePie.__schema_cache__.keys() == {(True, '#/definitions/{model}')} assert ApplePie.schema() == s def test_by_alias(): class ApplePie(BaseModel): a: float b: int = 10 class Config: title = 'Apple Pie' fields = {'a': 'Snap', 'b': 'Crackle'} assert ApplePie.schema() == { 'type': 'object', 'title': 'Apple Pie', 'properties': { 'Snap': {'type': 'number', 'title': 'Snap'}, 'Crackle': {'type': 'integer', 'title': 'Crackle', 'default': 10}, }, 'required': ['Snap'], } assert list(ApplePie.schema(by_alias=True)['properties'].keys()) == ['Snap', 'Crackle'] assert list(ApplePie.schema(by_alias=False)['properties'].keys()) == ['a', 'b'] def test_ref_template(): class KeyLimePie(BaseModel): x: str = None class ApplePie(BaseModel): a: float = None key_lime: KeyLimePie = None class Config: title = 'Apple Pie' assert ApplePie.schema(ref_template='foobar/{model}.json') == { 'title': 'Apple Pie', 'type': 'object', 'properties': {'a': {'title': 'A', 'type': 'number'}, 'key_lime': {'$ref': 'foobar/KeyLimePie.json'}}, 'definitions': { 'KeyLimePie': { 'title': 'KeyLimePie', 'type': 'object', 'properties': {'x': {'title': 'X', 'type': 'string'}}, }, }, } assert ApplePie.schema()['properties']['key_lime'] == {'$ref': '#/definitions/KeyLimePie'} json_schema = ApplePie.schema_json(ref_template='foobar/{model}.json') assert 'foobar/KeyLimePie.json' in json_schema assert '#/definitions/KeyLimePie' not in json_schema def test_by_alias_generator(): class ApplePie(BaseModel): a: float b: int = 10 class Config: @staticmethod def alias_generator(x): return x.upper() assert ApplePie.schema() == { 'title': 'ApplePie', 'type': 'object', 'properties': {'A': {'title': 'A', 'type': 'number'}, 'B': {'title': 'B', 'default': 10, 'type': 'integer'}}, 'required': ['A'], } assert ApplePie.schema(by_alias=False)['properties'].keys() == {'a', 'b'} def test_sub_model(): class Foo(BaseModel): """hello""" b: float class Bar(BaseModel): a: int b: Foo = None assert Bar.schema() == { 'type': 'object', 'title': 'Bar', 'definitions': { 'Foo': { 'type': 'object', 'title': 'Foo', 'description': 'hello', 'properties': {'b': {'type': 'number', 'title': 'B'}}, 'required': ['b'], } }, 'properties': {'a': {'type': 'integer', 'title': 'A'}, 'b': {'$ref': '#/definitions/Foo'}}, 'required': ['a'], } def test_schema_class(): class Model(BaseModel): foo: int = Field(4, title='Foo is Great') bar: str = Field(..., description='this description of bar') with pytest.raises(ValidationError): Model() m = Model(bar=123) assert m.dict() == {'foo': 4, 'bar': '123'} assert Model.schema() == { 'type': 'object', 'title': 'Model', 'properties': { 'foo': {'type': 'integer', 'title': 'Foo is Great', 'default': 4}, 'bar': {'type': 'string', 'title': 'Bar', 'description': 'this description of bar'}, }, 'required': ['bar'], } def test_schema_repr(): s = Field(4, title='Foo is Great') assert str(s) == "default=4 title='Foo is Great' extra={}" assert repr(s) == "FieldInfo(default=4, title='Foo is Great', extra={})" def test_schema_class_by_alias(): class Model(BaseModel): foo: int = Field(4, alias='foofoo') assert list(Model.schema()['properties'].keys()) == ['foofoo'] assert list(Model.schema(by_alias=False)['properties'].keys()) == ['foo'] def test_choices(): FooEnum = Enum('FooEnum', {'foo': 'f', 'bar': 'b'}) BarEnum = IntEnum('BarEnum', {'foo': 1, 'bar': 2}) class SpamEnum(str, Enum): foo = 'f' bar = 'b' class Model(BaseModel): foo: FooEnum bar: BarEnum spam: SpamEnum = Field(None) assert Model.schema() == { 'title': 'Model', 'type': 'object', 'properties': { 'foo': {'$ref': '#/definitions/FooEnum'}, 'bar': {'$ref': '#/definitions/BarEnum'}, 'spam': {'$ref': '#/definitions/SpamEnum'}, }, 'required': ['foo', 'bar'], 'definitions': { 'FooEnum': {'title': 'FooEnum', 'description': 'An enumeration.', 'enum': ['f', 'b']}, 'BarEnum': {'title': 'BarEnum', 'description': 'An enumeration.', 'type': 'integer', 'enum': [1, 2]}, 'SpamEnum': {'title': 'SpamEnum', 'description': 'An enumeration.', 'type': 'string', 'enum': ['f', 'b']}, }, } def test_enum_modify_schema(): class SpamEnum(str, Enum): foo = 'f' bar = 'b' @classmethod def __modify_schema__(cls, field_schema): field_schema['tsEnumNames'] = [e.name for e in cls] class Model(BaseModel): spam: SpamEnum = Field(None) assert Model.schema() == { 'definitions': { 'SpamEnum': { 'description': 'An enumeration.', 'enum': ['f', 'b'], 'title': 'SpamEnum', 'tsEnumNames': ['foo', 'bar'], 'type': 'string', } }, 'properties': {'spam': {'$ref': '#/definitions/SpamEnum'}}, 'title': 'Model', 'type': 'object', } def test_enum_schema_custom_field(): class FooBarEnum(str, Enum): foo = 'foo' bar = 'bar' class Model(BaseModel): pika: FooBarEnum = Field(alias='pikalias', title='Pikapika!', description='Pika is definitely the best!') bulbi: FooBarEnum = Field('foo', alias='bulbialias', title='Bulbibulbi!', description='Bulbi is not...') cara: FooBarEnum assert Model.schema() == { 'definitions': { 'FooBarEnum': { 'description': 'An enumeration.', 'enum': ['foo', 'bar'], 'title': 'FooBarEnum', 'type': 'string', } }, 'properties': { 'pikalias': { 'allOf': [{'$ref': '#/definitions/FooBarEnum'}], 'description': 'Pika is definitely the best!', 'title': 'Pikapika!', }, 'bulbialias': { 'allOf': [{'$ref': '#/definitions/FooBarEnum'}], 'description': 'Bulbi is not...', 'title': 'Bulbibulbi!', 'default': 'foo', }, 'cara': {'$ref': '#/definitions/FooBarEnum'}, }, 'required': ['pikalias', 'cara'], 'title': 'Model', 'type': 'object', } def test_enum_and_model_have_same_behaviour(): class Names(str, Enum): rick = 'Rick' morty = 'Morty' summer = 'Summer' class Pika(BaseModel): a: str class Foo(BaseModel): enum: Names titled_enum: Names = Field( ..., title='Title of enum', description='Description of enum', ) model: Pika titled_model: Pika = Field( ..., title='Title of model', description='Description of model', ) assert Foo.schema() == { 'definitions': { 'Pika': { 'properties': {'a': {'title': 'A', 'type': 'string'}}, 'required': ['a'], 'title': 'Pika', 'type': 'object', }, 'Names': { 'description': 'An enumeration.', 'enum': ['Rick', 'Morty', 'Summer'], 'title': 'Names', 'type': 'string', }, }, 'properties': { 'enum': {'$ref': '#/definitions/Names'}, 'model': {'$ref': '#/definitions/Pika'}, 'titled_enum': { 'allOf': [{'$ref': '#/definitions/Names'}], 'description': 'Description of enum', 'title': 'Title of enum', }, 'titled_model': { 'allOf': [{'$ref': '#/definitions/Pika'}], 'description': 'Description of model', 'title': 'Title of model', }, }, 'required': ['enum', 'titled_enum', 'model', 'titled_model'], 'title': 'Foo', 'type': 'object', } def test_list_enum_schema_extras(): class FoodChoice(str, Enum): spam = 'spam' egg = 'egg' chips = 'chips' class Model(BaseModel): foods: List[FoodChoice] = Field(examples=[['spam', 'egg']]) assert Model.schema() == { 'definitions': { 'FoodChoice': { 'description': 'An enumeration.', 'enum': ['spam', 'egg', 'chips'], 'title': 'FoodChoice', 'type': 'string', } }, 'properties': { 'foods': {'type': 'array', 'items': {'$ref': '#/definitions/FoodChoice'}, 'examples': [['spam', 'egg']]}, }, 'required': ['foods'], 'title': 'Model', 'type': 'object', } def test_json_schema(): class Model(BaseModel): a = b'foobar' b = Decimal('12.34') assert Model.schema_json(indent=2) == ( '{\n' ' "title": "Model",\n' ' "type": "object",\n' ' "properties": {\n' ' "a": {\n' ' "title": "A",\n' ' "default": "foobar",\n' ' "type": "string",\n' ' "format": "binary"\n' ' },\n' ' "b": {\n' ' "title": "B",\n' ' "default": 12.34,\n' ' "type": "number"\n' ' }\n' ' }\n' '}' ) def test_list_sub_model(): class Foo(BaseModel): a: float class Bar(BaseModel): b: List[Foo] assert Bar.schema() == { 'title': 'Bar', 'type': 'object', 'definitions': { 'Foo': { 'title': 'Foo', 'type': 'object', 'properties': {'a': {'type': 'number', 'title': 'A'}}, 'required': ['a'], } }, 'properties': {'b': {'type': 'array', 'items': {'$ref': '#/definitions/Foo'}, 'title': 'B'}}, 'required': ['b'], } def test_optional(): class Model(BaseModel): a: Optional[str] assert Model.schema() == {'title': 'Model', 'type': 'object', 'properties': {'a': {'type': 'string', 'title': 'A'}}} def test_any(): class Model(BaseModel): a: Any assert Model.schema() == {'title': 'Model', 'type': 'object', 'properties': {'a': {'title': 'A'}}} def test_set(): class Model(BaseModel): a: Set[int] b: set assert Model.schema() == { 'title': 'Model', 'type': 'object', 'properties': { 'a': {'title': 'A', 'type': 'array', 'uniqueItems': True, 'items': {'type': 'integer'}}, 'b': {'title': 'B', 'type': 'array', 'items': {}, 'uniqueItems': True}, }, 'required': ['a', 'b'], } def test_const_str(): class Model(BaseModel): a: str = Field('some string', const=True) assert Model.schema() == { 'title': 'Model', 'type': 'object', 'properties': {'a': {'title': 'A', 'type': 'string', 'const': 'some string'}}, } def test_const_false(): class Model(BaseModel): a: str = Field('some string', const=False) assert Model.schema() == { 'title': 'Model', 'type': 'object', 'properties': {'a': {'title': 'A', 'type': 'string', 'default': 'some string'}}, } @pytest.mark.parametrize( 'field_type,expected_schema', [ (tuple, {}), ( Tuple[str, int, Union[str, int, float], float], [ {'type': 'string'}, {'type': 'integer'}, {'anyOf': [{'type': 'string'}, {'type': 'integer'}, {'type': 'number'}]}, {'type': 'number'}, ], ), (Tuple[str], {'type': 'string'}), ], ) def test_tuple(field_type, expected_schema): class Model(BaseModel): a: field_type base_schema = { 'title': 'Model', 'type': 'object', 'properties': {'a': {'title': 'A', 'type': 'array'}}, 'required': ['a'], } base_schema['properties']['a']['items'] = expected_schema assert Model.schema() == base_schema def test_bool(): class Model(BaseModel): a: bool assert Model.schema() == { 'title': 'Model', 'type': 'object', 'properties': {'a': {'title': 'A', 'type': 'boolean'}}, 'required': ['a'], } def test_strict_bool(): class Model(BaseModel): a: StrictBool assert Model.schema() == { 'title': 'Model', 'type': 'object', 'properties': {'a': {'title': 'A', 'type': 'boolean'}}, 'required': ['a'], } def test_dict(): class Model(BaseModel): a: dict assert Model.schema() == { 'title': 'Model', 'type': 'object', 'properties': {'a': {'title': 'A', 'type': 'object'}}, 'required': ['a'], } def test_list(): class Model(BaseModel): a: list assert Model.schema() == { 'title': 'Model', 'type': 'object', 'properties': {'a': {'title': 'A', 'type': 'array', 'items': {}}}, 'required': ['a'], } class Foo(BaseModel): a: float @pytest.mark.parametrize( 'field_type,expected_schema', [ ( Union[int, str], { 'properties': {'a': {'title': 'A', 'anyOf': [{'type': 'integer'}, {'type': 'string'}]}}, 'required': ['a'], }, ), ( List[int], {'properties': {'a': {'title': 'A', 'type': 'array', 'items': {'type': 'integer'}}}, 'required': ['a']}, ), ( Dict[str, Foo], { 'definitions': { 'Foo': { 'title': 'Foo', 'type': 'object', 'properties': {'a': {'title': 'A', 'type': 'number'}}, 'required': ['a'], } }, 'properties': { 'a': {'title': 'A', 'type': 'object', 'additionalProperties': {'$ref': '#/definitions/Foo'}} }, 'required': ['a'], }, ), ( Union[None, Foo], { 'definitions': { 'Foo': { 'title': 'Foo', 'type': 'object', 'properties': {'a': {'title': 'A', 'type': 'number'}}, 'required': ['a'], } }, 'properties': {'a': {'$ref': '#/definitions/Foo'}}, }, ), (Dict[str, Any], {'properties': {'a': {'title': 'A', 'type': 'object'}}, 'required': ['a']}), ], ) def test_list_union_dict(field_type, expected_schema): class Model(BaseModel): a: field_type base_schema = {'title': 'Model', 'type': 'object'} base_schema.update(expected_schema) assert Model.schema() == base_schema @pytest.mark.parametrize( 'field_type,expected_schema', [ (datetime, {'type': 'string', 'format': 'date-time'}), (date, {'type': 'string', 'format': 'date'}), (time, {'type': 'string', 'format': 'time'}), (timedelta, {'type': 'number', 'format': 'time-delta'}), ], ) def test_date_types(field_type, expected_schema): class Model(BaseModel): a: field_type attribute_schema = {'title': 'A'} attribute_schema.update(expected_schema) base_schema = {'title': 'Model', 'type': 'object', 'properties': {'a': attribute_schema}, 'required': ['a']} assert Model.schema() == base_schema @pytest.mark.parametrize( 'field_type,expected_schema', [ (NoneStr, {'properties': {'a': {'title': 'A', 'type': 'string'}}}), (NoneBytes, {'properties': {'a': {'title': 'A', 'type': 'string', 'format': 'binary'}}}), ( StrBytes, { 'properties': { 'a': {'title': 'A', 'anyOf': [{'type': 'string'}, {'type': 'string', 'format': 'binary'}]} }, 'required': ['a'], }, ), ( NoneStrBytes, { 'properties': { 'a': {'title': 'A', 'anyOf': [{'type': 'string'}, {'type': 'string', 'format': 'binary'}]} } }, ), ], ) def test_str_basic_types(field_type, expected_schema): class Model(BaseModel): a: field_type base_schema = {'title': 'Model', 'type': 'object'} base_schema.update(expected_schema) assert Model.schema() == base_schema @pytest.mark.parametrize( 'field_type,expected_schema', [ (StrictStr, {'title': 'A', 'type': 'string'}), (ConstrainedStr, {'title': 'A', 'type': 'string'}), ( constr(min_length=3, max_length=5, regex='^text$'), {'title': 'A', 'type': 'string', 'minLength': 3, 'maxLength': 5, 'pattern': '^text$'}, ), ], ) def test_str_constrained_types(field_type, expected_schema): class Model(BaseModel): a: field_type model_schema = Model.schema() assert model_schema['properties']['a'] == expected_schema base_schema = {'title': 'Model', 'type': 'object', 'properties': {'a': expected_schema}, 'required': ['a']} assert model_schema == base_schema @pytest.mark.parametrize( 'field_type,expected_schema', [ (AnyUrl, {'title': 'A', 'type': 'string', 'format': 'uri', 'minLength': 1, 'maxLength': 2 ** 16}), ( stricturl(min_length=5, max_length=10), {'title': 'A', 'type': 'string', 'format': 'uri', 'minLength': 5, 'maxLength': 10}, ), ], ) def test_special_str_types(field_type, expected_schema): class Model(BaseModel): a: field_type base_schema = {'title': 'Model', 'type': 'object', 'properties': {'a': {}}, 'required': ['a']} base_schema['properties']['a'] = expected_schema assert Model.schema() == base_schema @pytest.mark.skipif(not email_validator, reason='email_validator not installed') @pytest.mark.parametrize('field_type,expected_schema', [(EmailStr, 'email'), (NameEmail, 'name-email')]) def test_email_str_types(field_type, expected_schema): class Model(BaseModel): a: field_type base_schema = { 'title': 'Model', 'type': 'object', 'properties': {'a': {'title': 'A', 'type': 'string'}}, 'required': ['a'], } base_schema['properties']['a']['format'] = expected_schema assert Model.schema() == base_schema @pytest.mark.parametrize('field_type,inner_type', [(SecretBytes, 'string'), (SecretStr, 'string')]) def test_secret_types(field_type, inner_type): class Model(BaseModel): a: field_type base_schema = { 'title': 'Model', 'type': 'object', 'properties': {'a': {'title': 'A', 'type': inner_type, 'writeOnly': True, 'format': 'password'}}, 'required': ['a'], } assert Model.schema() == base_schema @pytest.mark.parametrize( 'field_type,expected_schema', [ (ConstrainedInt, {}), (conint(gt=5, lt=10), {'exclusiveMinimum': 5, 'exclusiveMaximum': 10}), (conint(ge=5, le=10), {'minimum': 5, 'maximum': 10}), (conint(multiple_of=5), {'multipleOf': 5}), (PositiveInt, {'exclusiveMinimum': 0}), (NegativeInt, {'exclusiveMaximum': 0}), (NonNegativeInt, {'minimum': 0}), (NonPositiveInt, {'maximum': 0}), ], ) def test_special_int_types(field_type, expected_schema): class Model(BaseModel): a: field_type base_schema = { 'title': 'Model', 'type': 'object', 'properties': {'a': {'title': 'A', 'type': 'integer'}}, 'required': ['a'], } base_schema['properties']['a'].update(expected_schema) assert Model.schema() == base_schema @pytest.mark.parametrize( 'field_type,expected_schema', [ (ConstrainedFloat, {}), (confloat(gt=5, lt=10), {'exclusiveMinimum': 5, 'exclusiveMaximum': 10}), (confloat(ge=5, le=10), {'minimum': 5, 'maximum': 10}), (confloat(multiple_of=5), {'multipleOf': 5}), (PositiveFloat, {'exclusiveMinimum': 0}), (NegativeFloat, {'exclusiveMaximum': 0}), (NonNegativeFloat, {'minimum': 0}), (NonPositiveFloat, {'maximum': 0}), (ConstrainedDecimal, {}), (condecimal(gt=5, lt=10), {'exclusiveMinimum': 5, 'exclusiveMaximum': 10}), (condecimal(ge=5, le=10), {'minimum': 5, 'maximum': 10}), (condecimal(multiple_of=5), {'multipleOf': 5}), ], ) def test_special_float_types(field_type, expected_schema): class Model(BaseModel): a: field_type base_schema = { 'title': 'Model', 'type': 'object', 'properties': {'a': {'title': 'A', 'type': 'number'}}, 'required': ['a'], } base_schema['properties']['a'].update(expected_schema) assert Model.schema() == base_schema @pytest.mark.parametrize( 'field_type,expected_schema', [(UUID, 'uuid'), (UUID1, 'uuid1'), (UUID3, 'uuid3'), (UUID4, 'uuid4'), (UUID5, 'uuid5')], ) def test_uuid_types(field_type, expected_schema): class Model(BaseModel): a: field_type base_schema = { 'title': 'Model', 'type': 'object', 'properties': {'a': {'title': 'A', 'type': 'string', 'format': ''}}, 'required': ['a'], } base_schema['properties']['a']['format'] = expected_schema assert Model.schema() == base_schema @pytest.mark.parametrize( 'field_type,expected_schema', [(FilePath, 'file-path'), (DirectoryPath, 'directory-path'), (Path, 'path')] ) def test_path_types(field_type, expected_schema): class Model(BaseModel): a: field_type base_schema = { 'title': 'Model', 'type': 'object', 'properties': {'a': {'title': 'A', 'type': 'string', 'format': ''}}, 'required': ['a'], } base_schema['properties']['a']['format'] = expected_schema assert Model.schema() == base_schema def test_json_type(): class Model(BaseModel): a: Json b: Json[int] assert Model.schema() == { 'title': 'Model', 'type': 'object', 'properties': { 'a': {'title': 'A', 'type': 'string', 'format': 'json-string'}, 'b': {'title': 'B', 'type': 'integer'}, }, 'required': ['b'], } def test_ipv4address_type(): class Model(BaseModel): ip_address: IPv4Address model_schema = Model.schema() assert model_schema == { 'title': 'Model', 'type': 'object', 'properties': {'ip_address': {'title': 'Ip Address', 'type': 'string', 'format': 'ipv4'}}, 'required': ['ip_address'], } def test_ipv6address_type(): class Model(BaseModel): ip_address: IPv6Address model_schema = Model.schema() assert model_schema == { 'title': 'Model', 'type': 'object', 'properties': {'ip_address': {'title': 'Ip Address', 'type': 'string', 'format': 'ipv6'}}, 'required': ['ip_address'], } def test_ipvanyaddress_type(): class Model(BaseModel): ip_address: IPvAnyAddress model_schema = Model.schema() assert model_schema == { 'title': 'Model', 'type': 'object', 'properties': {'ip_address': {'title': 'Ip Address', 'type': 'string', 'format': 'ipvanyaddress'}}, 'required': ['ip_address'], } def test_ipv4interface_type(): class Model(BaseModel): ip_interface: IPv4Interface model_schema = Model.schema() assert model_schema == { 'title': 'Model', 'type': 'object', 'properties': {'ip_interface': {'title': 'Ip Interface', 'type': 'string', 'format': 'ipv4interface'}}, 'required': ['ip_interface'], } def test_ipv6interface_type(): class Model(BaseModel): ip_interface: IPv6Interface model_schema = Model.schema() assert model_schema == { 'title': 'Model', 'type': 'object', 'properties': {'ip_interface': {'title': 'Ip Interface', 'type': 'string', 'format': 'ipv6interface'}}, 'required': ['ip_interface'], } def test_ipvanyinterface_type(): class Model(BaseModel): ip_interface: IPvAnyInterface model_schema = Model.schema() assert model_schema == { 'title': 'Model', 'type': 'object', 'properties': {'ip_interface': {'title': 'Ip Interface', 'type': 'string', 'format': 'ipvanyinterface'}}, 'required': ['ip_interface'], } def test_ipv4network_type(): class Model(BaseModel): ip_network: IPv4Network model_schema = Model.schema() assert model_schema == { 'title': 'Model', 'type': 'object', 'properties': {'ip_network': {'title': 'Ip Network', 'type': 'string', 'format': 'ipv4network'}}, 'required': ['ip_network'], } def test_ipv6network_type(): class Model(BaseModel): ip_network: IPv6Network model_schema = Model.schema() assert model_schema == { 'title': 'Model', 'type': 'object', 'properties': {'ip_network': {'title': 'Ip Network', 'type': 'string', 'format': 'ipv6network'}}, 'required': ['ip_network'], } def test_ipvanynetwork_type(): class Model(BaseModel): ip_network: IPvAnyNetwork model_schema = Model.schema() assert model_schema == { 'title': 'Model', 'type': 'object', 'properties': {'ip_network': {'title': 'Ip Network', 'type': 'string', 'format': 'ipvanynetwork'}}, 'required': ['ip_network'], } @pytest.mark.parametrize( 'type_,default_value', ( (Callable, ...), (Callable, lambda x: x), (Callable[[int], int], ...), (Callable[[int], int], lambda x: x), ), ) def test_callable_type(type_, default_value): class Model(BaseModel): callback: type_ = default_value foo: int with pytest.warns(UserWarning): model_schema = Model.schema() assert 'callback' not in model_schema['properties'] def test_error_non_supported_types(): class Model(BaseModel): a: PyObject with pytest.raises(ValueError): Model.schema() def create_testing_submodules(): base_path = Path(tempfile.mkdtemp()) mod_root_path = base_path / 'pydantic_schema_test' os.makedirs(mod_root_path, exist_ok=True) open(mod_root_path / '__init__.py', 'w').close() for mod in ['a', 'b', 'c']: module_name = 'module' + mod model_name = 'model' + mod + '.py' os.makedirs(mod_root_path / module_name, exist_ok=True) open(mod_root_path / module_name / '__init__.py', 'w').close() with open(mod_root_path / module_name / model_name, 'w') as f: f.write('from pydantic import BaseModel\n' 'class Model(BaseModel):\n' ' a: str\n') module_name = 'moduled' model_name = 'modeld.py' os.makedirs(mod_root_path / module_name, exist_ok=True) open(mod_root_path / module_name / '__init__.py', 'w').close() with open(mod_root_path / module_name / model_name, 'w') as f: f.write('from ..moduleb.modelb import Model') sys.path.insert(0, str(base_path)) def test_flat_models_unique_models(): create_testing_submodules() from pydantic_schema_test.modulea.modela import Model as ModelA from pydantic_schema_test.moduleb.modelb import Model as ModelB from pydantic_schema_test.moduled.modeld import Model as ModelD flat_models = get_flat_models_from_models([ModelA, ModelB, ModelD]) assert flat_models == set([ModelA, ModelB]) def test_flat_models_with_submodels(): class Foo(BaseModel): a: str class Bar(BaseModel): b: List[Foo] class Baz(BaseModel): c: Dict[str, Bar] flat_models = get_flat_models_from_model(Baz) assert flat_models == set([Foo, Bar, Baz]) def test_flat_models_with_submodels_from_sequence(): class Foo(BaseModel): a: str class Bar(BaseModel): b: Foo class Ingredient(BaseModel): name: str class Pizza(BaseModel): name: str ingredients: List[Ingredient] flat_models = get_flat_models_from_models([Bar, Pizza]) assert flat_models == set([Foo, Bar, Ingredient, Pizza]) def test_model_name_maps(): create_testing_submodules() from pydantic_schema_test.modulea.modela import Model as ModelA from pydantic_schema_test.moduleb.modelb import Model as ModelB from pydantic_schema_test.modulec.modelc import Model as ModelC from pydantic_schema_test.moduled.modeld import Model as ModelD class Foo(BaseModel): a: str class Bar(BaseModel): b: Foo class Baz(BaseModel): c: Bar flat_models = get_flat_models_from_models([Baz, ModelA, ModelB, ModelC, ModelD]) model_name_map = get_model_name_map(flat_models) assert model_name_map == { Foo: 'Foo', Bar: 'Bar', Baz: 'Baz', ModelA: 'pydantic_schema_test__modulea__modela__Model', ModelB: 'pydantic_schema_test__moduleb__modelb__Model', ModelC: 'pydantic_schema_test__modulec__modelc__Model', } def test_schema_overrides(): class Foo(BaseModel): a: str class Bar(BaseModel): b: Foo = Foo(a='foo') class Baz(BaseModel): c: Optional[Bar] class Model(BaseModel): d: Baz model_schema = Model.schema() assert model_schema == { 'title': 'Model', 'type': 'object', 'definitions': { 'Foo': { 'title': 'Foo', 'type': 'object', 'properties': {'a': {'title': 'A', 'type': 'string'}}, 'required': ['a'], }, 'Bar': { 'title': 'Bar', 'type': 'object', 'properties': {'b': {'title': 'B', 'default': {'a': 'foo'}, 'allOf': [{'$ref': '#/definitions/Foo'}]}}, }, 'Baz': {'title': 'Baz', 'type': 'object', 'properties': {'c': {'$ref': '#/definitions/Bar'}}}, }, 'properties': {'d': {'$ref': '#/definitions/Baz'}}, 'required': ['d'], } def test_schema_overrides_w_union(): class Foo(BaseModel): pass class Bar(BaseModel): pass class Spam(BaseModel): a: Union[Foo, Bar] = Field(..., description='xxx') assert Spam.schema()['properties'] == { 'a': { 'title': 'A', 'description': 'xxx', 'anyOf': [{'$ref': '#/definitions/Foo'}, {'$ref': '#/definitions/Bar'}], }, } def test_schema_from_models(): class Foo(BaseModel): a: str class Bar(BaseModel): b: Foo class Baz(BaseModel): c: Bar class Model(BaseModel): d: Baz class Ingredient(BaseModel): name: str class Pizza(BaseModel): name: str ingredients: List[Ingredient] model_schema = schema( [Model, Pizza], title='Multi-model schema', description='Single JSON Schema with multiple definitions' ) assert model_schema == { 'title': 'Multi-model schema', 'description': 'Single JSON Schema with multiple definitions', 'definitions': { 'Pizza': { 'title': 'Pizza', 'type': 'object', 'properties': { 'name': {'title': 'Name', 'type': 'string'}, 'ingredients': { 'title': 'Ingredients', 'type': 'array', 'items': {'$ref': '#/definitions/Ingredient'}, }, }, 'required': ['name', 'ingredients'], }, 'Ingredient': { 'title': 'Ingredient', 'type': 'object', 'properties': {'name': {'title': 'Name', 'type': 'string'}}, 'required': ['name'], }, 'Model': { 'title': 'Model', 'type': 'object', 'properties': {'d': {'$ref': '#/definitions/Baz'}}, 'required': ['d'], }, 'Baz': { 'title': 'Baz', 'type': 'object', 'properties': {'c': {'$ref': '#/definitions/Bar'}}, 'required': ['c'], }, 'Bar': { 'title': 'Bar', 'type': 'object', 'properties': {'b': {'$ref': '#/definitions/Foo'}}, 'required': ['b'], }, 'Foo': { 'title': 'Foo', 'type': 'object', 'properties': {'a': {'title': 'A', 'type': 'string'}}, 'required': ['a'], }, }, } @pytest.mark.parametrize( 'ref_prefix,ref_template', [ # OpenAPI style ('#/components/schemas/', None), (None, '#/components/schemas/{model}'), # ref_prefix takes priority ('#/components/schemas/', '#/{model}/schemas/'), ], ) def test_schema_with_refs(ref_prefix, ref_template): class Foo(BaseModel): a: str class Bar(BaseModel): b: Foo class Baz(BaseModel): c: Bar model_schema = schema([Bar, Baz], ref_prefix=ref_prefix, ref_template=ref_template) assert model_schema == { 'definitions': { 'Baz': { 'title': 'Baz', 'type': 'object', 'properties': {'c': {'$ref': '#/components/schemas/Bar'}}, 'required': ['c'], }, 'Bar': { 'title': 'Bar', 'type': 'object', 'properties': {'b': {'$ref': '#/components/schemas/Foo'}}, 'required': ['b'], }, 'Foo': { 'title': 'Foo', 'type': 'object', 'properties': {'a': {'title': 'A', 'type': 'string'}}, 'required': ['a'], }, } } def test_schema_with_custom_ref_template(): class Foo(BaseModel): a: str class Bar(BaseModel): b: Foo class Baz(BaseModel): c: Bar model_schema = schema([Bar, Baz], ref_template='/schemas/{model}.json#/') assert model_schema == { 'definitions': { 'Baz': { 'title': 'Baz', 'type': 'object', 'properties': {'c': {'$ref': '/schemas/Bar.json#/'}}, 'required': ['c'], }, 'Bar': { 'title': 'Bar', 'type': 'object', 'properties': {'b': {'$ref': '/schemas/Foo.json#/'}}, 'required': ['b'], }, 'Foo': { 'title': 'Foo', 'type': 'object', 'properties': {'a': {'title': 'A', 'type': 'string'}}, 'required': ['a'], }, } } def test_schema_ref_template_key_error(): class Foo(BaseModel): a: str class Bar(BaseModel): b: Foo class Baz(BaseModel): c: Bar with pytest.raises(KeyError): schema([Bar, Baz], ref_template='/schemas/{bad_name}.json#/') def test_schema_no_definitions(): model_schema = schema([], title='Schema without definitions') assert model_schema == {'title': 'Schema without definitions'} def test_list_default(): class UserModel(BaseModel): friends: List[int] = [1] assert UserModel.schema() == { 'title': 'UserModel', 'type': 'object', 'properties': {'friends': {'title': 'Friends', 'default': [1], 'type': 'array', 'items': {'type': 'integer'}}}, } def test_dict_default(): class UserModel(BaseModel): friends: Dict[str, float] = {'a': 1.1, 'b': 2.2} assert UserModel.schema() == { 'title': 'UserModel', 'type': 'object', 'properties': { 'friends': { 'title': 'Friends', 'default': {'a': 1.1, 'b': 2.2}, 'type': 'object', 'additionalProperties': {'type': 'number'}, } }, } @pytest.mark.parametrize( 'kwargs,type_,expected_extra', [ ({'max_length': 5}, str, {'type': 'string', 'maxLength': 5}), ({}, constr(max_length=6), {'type': 'string', 'maxLength': 6}), ({'min_length': 2}, str, {'type': 'string', 'minLength': 2}), ({'max_length': 5}, bytes, {'type': 'string', 'maxLength': 5, 'format': 'binary'}), ({'regex': '^foo$'}, str, {'type': 'string', 'pattern': '^foo$'}), ({'gt': 2}, int, {'type': 'integer', 'exclusiveMinimum': 2}), ({'lt': 5}, int, {'type': 'integer', 'exclusiveMaximum': 5}), ({'ge': 2}, int, {'type': 'integer', 'minimum': 2}), ({'le': 5}, int, {'type': 'integer', 'maximum': 5}), ({'multiple_of': 5}, int, {'type': 'integer', 'multipleOf': 5}), ({'gt': 2}, float, {'type': 'number', 'exclusiveMinimum': 2}), ({'lt': 5}, float, {'type': 'number', 'exclusiveMaximum': 5}), ({'ge': 2}, float, {'type': 'number', 'minimum': 2}), ({'le': 5}, float, {'type': 'number', 'maximum': 5}), ({'gt': -math.inf}, float, {'type': 'number'}), ({'lt': math.inf}, float, {'type': 'number'}), ({'ge': -math.inf}, float, {'type': 'number'}), ({'le': math.inf}, float, {'type': 'number'}), ({'multiple_of': 5}, float, {'type': 'number', 'multipleOf': 5}), ({'gt': 2}, Decimal, {'type': 'number', 'exclusiveMinimum': 2}), ({'lt': 5}, Decimal, {'type': 'number', 'exclusiveMaximum': 5}), ({'ge': 2}, Decimal, {'type': 'number', 'minimum': 2}), ({'le': 5}, Decimal, {'type': 'number', 'maximum': 5}), ({'multiple_of': 5}, Decimal, {'type': 'number', 'multipleOf': 5}), ], ) def test_constraints_schema(kwargs, type_, expected_extra): class Foo(BaseModel): a: type_ = Field('foo', title='A title', description='A description', **kwargs) expected_schema = { 'title': 'Foo', 'type': 'object', 'properties': {'a': {'title': 'A title', 'description': 'A description', 'default': 'foo'}}, } expected_schema['properties']['a'].update(expected_extra) assert Foo.schema() == expected_schema @pytest.mark.parametrize( 'kwargs,type_', [ ({'max_length': 5}, int), ({'min_length': 2}, float), ({'max_length': 5}, Decimal), ({'allow_mutation': False}, bool), ({'regex': '^foo$'}, int), ({'gt': 2}, str), ({'lt': 5}, bytes), ({'ge': 2}, str), ({'le': 5}, bool), ({'gt': 0}, Callable), ({'gt': 0}, Callable[[int], int]), ({'gt': 0}, conlist(int, min_items=4)), ({'gt': 0}, conset(int, min_items=4)), ], ) def test_unenforced_constraints_schema(kwargs, type_): with pytest.raises(ValueError, match='On field "a" the following field constraints are set but not enforced'): class Foo(BaseModel): a: type_ = Field('foo', title='A title', description='A description', **kwargs) @pytest.mark.parametrize( 'kwargs,type_,value', [ ({'max_length': 5}, str, 'foo'), ({'min_length': 2}, str, 'foo'), ({'max_length': 5}, bytes, b'foo'), ({'regex': '^foo$'}, str, 'foo'), ({'gt': 2}, int, 3), ({'lt': 5}, int, 3), ({'ge': 2}, int, 3), ({'ge': 2}, int, 2), ({'gt': 2}, int, '3'), ({'le': 5}, int, 3), ({'le': 5}, int, 5), ({'gt': 2}, float, 3.0), ({'gt': 2}, float, 2.1), ({'lt': 5}, float, 3.0), ({'lt': 5}, float, 4.9), ({'ge': 2}, float, 3.0), ({'ge': 2}, float, 2.0), ({'le': 5}, float, 3.0), ({'le': 5}, float, 5.0), ({'gt': 2}, float, 3), ({'gt': 2}, float, '3'), ({'gt': 2}, Decimal, Decimal(3)), ({'lt': 5}, Decimal, Decimal(3)), ({'ge': 2}, Decimal, Decimal(3)), ({'ge': 2}, Decimal, Decimal(2)), ({'le': 5}, Decimal, Decimal(3)), ({'le': 5}, Decimal, Decimal(5)), ], ) def test_constraints_schema_validation(kwargs, type_, value): class Foo(BaseModel): a: type_ = Field('foo', title='A title', description='A description', **kwargs) assert Foo(a=value) @pytest.mark.parametrize( 'kwargs,type_,value', [ ({'max_length': 5}, str, 'foobar'), ({'min_length': 2}, str, 'f'), ({'regex': '^foo$'}, str, 'bar'), ({'gt': 2}, int, 2), ({'lt': 5}, int, 5), ({'ge': 2}, int, 1), ({'le': 5}, int, 6), ({'gt': 2}, float, 2.0), ({'lt': 5}, float, 5.0), ({'ge': 2}, float, 1.9), ({'le': 5}, float, 5.1), ({'gt': 2}, Decimal, Decimal(2)), ({'lt': 5}, Decimal, Decimal(5)), ({'ge': 2}, Decimal, Decimal(1)), ({'le': 5}, Decimal, Decimal(6)), ], ) def test_constraints_schema_validation_raises(kwargs, type_, value): class Foo(BaseModel): a: type_ = Field('foo', title='A title', description='A description', **kwargs) with pytest.raises(ValidationError): Foo(a=value) def test_schema_kwargs(): class Foo(BaseModel): a: str = Field('foo', examples=['bar']) assert Foo.schema() == { 'title': 'Foo', 'type': 'object', 'properties': {'a': {'type': 'string', 'title': 'A', 'default': 'foo', 'examples': ['bar']}}, } def test_schema_dict_constr(): regex_str = r'^([a-zA-Z_][a-zA-Z0-9_]*)$' ConStrType = constr(regex=regex_str) ConStrKeyDict = Dict[ConStrType, str] class Foo(BaseModel): a: ConStrKeyDict = {} assert Foo.schema() == { 'title': 'Foo', 'type': 'object', 'properties': { 'a': {'type': 'object', 'title': 'A', 'default': {}, 'patternProperties': {regex_str: {'type': 'string'}}} }, } @pytest.mark.parametrize( 'field_type,expected_schema', [ (ConstrainedBytes, {'title': 'A', 'type': 'string', 'format': 'binary'}), ( conbytes(min_length=3, max_length=5), {'title': 'A', 'type': 'string', 'format': 'binary', 'minLength': 3, 'maxLength': 5}, ), ], ) def test_bytes_constrained_types(field_type, expected_schema): class Model(BaseModel): a: field_type base_schema = {'title': 'Model', 'type': 'object', 'properties': {'a': {}}, 'required': ['a']} base_schema['properties']['a'] = expected_schema assert Model.schema() == base_schema def test_optional_dict(): class Model(BaseModel): something: Optional[Dict[str, Any]] assert Model.schema() == { 'title': 'Model', 'type': 'object', 'properties': {'something': {'title': 'Something', 'type': 'object'}}, } assert Model().dict() == {'something': None} assert Model(something={'foo': 'Bar'}).dict() == {'something': {'foo': 'Bar'}} def test_optional_validator(): class Model(BaseModel): something: Optional[str] @validator('something', always=True) def check_something(cls, v): assert v is None or 'x' not in v, 'should not contain x' return v assert Model.schema() == { 'title': 'Model', 'type': 'object', 'properties': {'something': {'title': 'Something', 'type': 'string'}}, } assert Model().dict() == {'something': None} assert Model(something=None).dict() == {'something': None} assert Model(something='hello').dict() == {'something': 'hello'} def test_field_with_validator(): class Model(BaseModel): something: Optional[int] = None @validator('something') def check_field(cls, v, *, values, config, field): return v assert Model.schema() == { 'title': 'Model', 'type': 'object', 'properties': {'something': {'type': 'integer', 'title': 'Something'}}, } def test_unparameterized_schema_generation(): class FooList(BaseModel): d: List class BarList(BaseModel): d: list assert model_schema(FooList) == { 'title': 'FooList', 'type': 'object', 'properties': {'d': {'items': {}, 'title': 'D', 'type': 'array'}}, 'required': ['d'], } foo_list_schema = model_schema(FooList) bar_list_schema = model_schema(BarList) bar_list_schema['title'] = 'FooList' # to check for equality assert foo_list_schema == bar_list_schema class FooDict(BaseModel): d: Dict class BarDict(BaseModel): d: dict model_schema(Foo) assert model_schema(FooDict) == { 'title': 'FooDict', 'type': 'object', 'properties': {'d': {'title': 'D', 'type': 'object'}}, 'required': ['d'], } foo_dict_schema = model_schema(FooDict) bar_dict_schema = model_schema(BarDict) bar_dict_schema['title'] = 'FooDict' # to check for equality assert foo_dict_schema == bar_dict_schema def test_known_model_optimization(): class Dep(BaseModel): number: int class Model(BaseModel): dep: Dep dep_l: List[Dep] expected = { 'title': 'Model', 'type': 'object', 'properties': { 'dep': {'$ref': '#/definitions/Dep'}, 'dep_l': {'title': 'Dep L', 'type': 'array', 'items': {'$ref': '#/definitions/Dep'}}, }, 'required': ['dep', 'dep_l'], 'definitions': { 'Dep': { 'title': 'Dep', 'type': 'object', 'properties': {'number': {'title': 'Number', 'type': 'integer'}}, 'required': ['number'], } }, } assert Model.schema() == expected def test_root(): class Model(BaseModel): __root__: str assert Model.schema() == {'title': 'Model', 'type': 'string'} def test_root_list(): class Model(BaseModel): __root__: List[str] assert Model.schema() == {'title': 'Model', 'type': 'array', 'items': {'type': 'string'}} def test_root_nested_model(): class NestedModel(BaseModel): a: str class Model(BaseModel): __root__: List[NestedModel] assert Model.schema() == { 'title': 'Model', 'type': 'array', 'items': {'$ref': '#/definitions/NestedModel'}, 'definitions': { 'NestedModel': { 'title': 'NestedModel', 'type': 'object', 'properties': {'a': {'title': 'A', 'type': 'string'}}, 'required': ['a'], } }, } def test_new_type_schema(): a_type = NewType('a_type', int) b_type = NewType('b_type', a_type) c_type = NewType('c_type', str) class Model(BaseModel): a: a_type b: b_type c: c_type assert Model.schema() == { 'properties': { 'a': {'title': 'A', 'type': 'integer'}, 'b': {'title': 'B', 'type': 'integer'}, 'c': {'title': 'C', 'type': 'string'}, }, 'required': ['a', 'b', 'c'], 'title': 'Model', 'type': 'object', } @pytest.mark.skipif(not Literal, reason='typing_extensions not installed and python version < 3.8') def test_literal_schema(): class Model(BaseModel): a: Literal[1] b: Literal['a'] c: Literal['a', 1] assert Model.schema() == { 'properties': { 'a': {'title': 'A', 'type': 'integer', 'const': 1}, 'b': {'title': 'B', 'type': 'string', 'const': 'a'}, 'c': {'anyOf': [{'type': 'string', 'const': 'a'}, {'type': 'integer', 'const': 1}], 'title': 'C'}, }, 'required': ['a', 'b', 'c'], 'title': 'Model', 'type': 'object', } def test_color_type(): class Model(BaseModel): color: Color model_schema = Model.schema() assert model_schema == { 'title': 'Model', 'type': 'object', 'properties': {'color': {'title': 'Color', 'type': 'string', 'format': 'color'}}, 'required': ['color'], } def test_model_with_schema_extra(): class Model(BaseModel): a: str class Config: schema_extra = {'examples': [{'a': 'Foo'}]} assert Model.schema() == { 'title': 'Model', 'type': 'object', 'properties': {'a': {'title': 'A', 'type': 'string'}}, 'required': ['a'], 'examples': [{'a': 'Foo'}], } def test_model_with_schema_extra_callable(): class Model(BaseModel): name: str = None class Config: @staticmethod def schema_extra(schema, model_class): schema.pop('properties') schema['type'] = 'override' assert model_class is Model assert Model.schema() == {'title': 'Model', 'type': 'override'} def test_model_with_schema_extra_callable_no_model_class(): class Model(BaseModel): name: str = None class Config: @staticmethod def schema_extra(schema): schema.pop('properties') schema['type'] = 'override' assert Model.schema() == {'title': 'Model', 'type': 'override'} def test_model_with_schema_extra_callable_classmethod(): class Model(BaseModel): name: str = None class Config: type = 'foo' @classmethod def schema_extra(cls, schema, model_class): schema.pop('properties') schema['type'] = cls.type assert model_class is Model assert Model.schema() == {'title': 'Model', 'type': 'foo'} def test_model_with_schema_extra_callable_instance_method(): class Model(BaseModel): name: str = None class Config: def schema_extra(schema, model_class): schema.pop('properties') schema['type'] = 'override' assert model_class is Model assert Model.schema() == {'title': 'Model', 'type': 'override'} def test_model_with_extra_forbidden(): class Model(BaseModel): a: str class Config: extra = Extra.forbid assert Model.schema() == { 'title': 'Model', 'type': 'object', 'properties': {'a': {'title': 'A', 'type': 'string'}}, 'required': ['a'], 'additionalProperties': False, } @pytest.mark.parametrize( 'annotation,kwargs,field_schema', [ (int, dict(gt=0), {'title': 'A', 'exclusiveMinimum': 0, 'type': 'integer'}), (Optional[int], dict(gt=0), {'title': 'A', 'exclusiveMinimum': 0, 'type': 'integer'}), ( Tuple[int, ...], dict(gt=0), {'title': 'A', 'exclusiveMinimum': 0, 'type': 'array', 'items': {'exclusiveMinimum': 0, 'type': 'integer'}}, ), ( Tuple[int, int, int], dict(gt=0), { 'title': 'A', 'type': 'array', 'items': [ {'exclusiveMinimum': 0, 'type': 'integer'}, {'exclusiveMinimum': 0, 'type': 'integer'}, {'exclusiveMinimum': 0, 'type': 'integer'}, ], }, ), ( Union[int, float], dict(gt=0), { 'title': 'A', 'anyOf': [{'exclusiveMinimum': 0, 'type': 'integer'}, {'exclusiveMinimum': 0, 'type': 'number'}], }, ), ( List[int], dict(gt=0), {'title': 'A', 'exclusiveMinimum': 0, 'type': 'array', 'items': {'exclusiveMinimum': 0, 'type': 'integer'}}, ), ( Dict[str, int], dict(gt=0), { 'title': 'A', 'exclusiveMinimum': 0, 'type': 'object', 'additionalProperties': {'exclusiveMinimum': 0, 'type': 'integer'}, }, ), ( Union[str, int], dict(gt=0, max_length=5), {'title': 'A', 'anyOf': [{'maxLength': 5, 'type': 'string'}, {'exclusiveMinimum': 0, 'type': 'integer'}]}, ), ], ) def test_enforced_constraints(annotation, kwargs, field_schema): class Model(BaseModel): a: annotation = Field(..., **kwargs) schema = Model.schema() # debug(schema['properties']['a']) assert schema['properties']['a'] == field_schema def test_real_vs_phony_constraints(): class Model1(BaseModel): foo: int = Field(..., gt=123) class Config: title = 'Test Model' class Model2(BaseModel): foo: int = Field(..., exclusiveMinimum=123) class Config: title = 'Test Model' with pytest.raises(ValidationError, match='ensure this value is greater than 123'): Model1(foo=122) assert Model2(foo=122).dict() == {'foo': 122} assert ( Model1.schema() == Model2.schema() == { 'title': 'Test Model', 'type': 'object', 'properties': {'foo': {'title': 'Foo', 'exclusiveMinimum': 123, 'type': 'integer'}}, 'required': ['foo'], } ) def test_subfield_field_info(): class MyModel(BaseModel): entries: Dict[str, List[int]] assert MyModel.schema() == { 'title': 'MyModel', 'type': 'object', 'properties': { 'entries': { 'title': 'Entries', 'type': 'object', 'additionalProperties': {'type': 'array', 'items': {'type': 'integer'}}, } }, 'required': ['entries'], } def test_dataclass(): @dataclass class Model: a: bool assert schema([Model]) == { 'definitions': { 'Model': { 'title': 'Model', 'type': 'object', 'properties': {'a': {'title': 'A', 'type': 'boolean'}}, 'required': ['a'], } } } assert model_schema(Model) == { 'title': 'Model', 'type': 'object', 'properties': {'a': {'title': 'A', 'type': 'boolean'}}, 'required': ['a'], } def test_schema_attributes(): class ExampleEnum(Enum): """This is a test description.""" gt = 'GT' lt = 'LT' ge = 'GE' le = 'LE' max_length = 'ML' multiple_of = 'MO' regex = 'RE' class Example(BaseModel): example: ExampleEnum assert Example.schema() == { 'title': 'Example', 'type': 'object', 'properties': {'example': {'$ref': '#/definitions/ExampleEnum'}}, 'required': ['example'], 'definitions': { 'ExampleEnum': { 'title': 'ExampleEnum', 'description': 'This is a test description.', 'enum': ['GT', 'LT', 'GE', 'LE', 'ML', 'MO', 'RE'], } }, } def test_model_process_schema_enum(): class SpamEnum(str, Enum): foo = 'f' bar = 'b' model_schema, _, _ = model_process_schema(SpamEnum, model_name_map={}) assert model_schema == {'title': 'SpamEnum', 'description': 'An enumeration.', 'type': 'string', 'enum': ['f', 'b']} def test_path_modify_schema(): class MyPath(Path): @classmethod def __modify_schema__(cls, schema): schema.update(foobar=123) class Model(BaseModel): path1: Path path2: MyPath path3: List[MyPath] assert Model.schema() == { 'title': 'Model', 'type': 'object', 'properties': { 'path1': {'title': 'Path1', 'type': 'string', 'format': 'path'}, 'path2': {'title': 'Path2', 'type': 'string', 'format': 'path', 'foobar': 123}, 'path3': {'title': 'Path3', 'type': 'array', 'items': {'type': 'string', 'format': 'path', 'foobar': 123}}, }, 'required': ['path1', 'path2', 'path3'], } def test_frozen_set(): class Model(BaseModel): a: FrozenSet[int] = frozenset({1, 2, 3}) b: FrozenSet = frozenset({1, 2, 3}) c: frozenset = frozenset({1, 2, 3}) d: frozenset = ... assert Model.schema() == { 'title': 'Model', 'type': 'object', 'properties': { 'a': { 'title': 'A', 'default': frozenset({1, 2, 3}), 'type': 'array', 'items': {'type': 'integer'}, 'uniqueItems': True, }, 'b': {'title': 'B', 'default': frozenset({1, 2, 3}), 'type': 'array', 'items': {}, 'uniqueItems': True}, 'c': {'title': 'C', 'default': frozenset({1, 2, 3}), 'type': 'array', 'items': {}, 'uniqueItems': True}, 'd': {'title': 'D', 'type': 'array', 'items': {}, 'uniqueItems': True}, }, 'required': ['d'], } def test_iterable(): class Model(BaseModel): a: Iterable[int] assert Model.schema() == { 'title': 'Model', 'type': 'object', 'properties': {'a': {'title': 'A', 'type': 'array', 'items': {'type': 'integer'}}}, 'required': ['a'], } def test_new_type(): new_type = NewType('NewStr', str) class Model(BaseModel): a: new_type assert Model.schema() == { 'title': 'Model', 'type': 'object', 'properties': {'a': {'title': 'A', 'type': 'string'}}, 'required': ['a'], } def test_multiple_models_with_same_name(create_module): module = create_module( # language=Python """ from pydantic import BaseModel class ModelOne(BaseModel): class NestedModel(BaseModel): a: float nested: NestedModel class ModelTwo(BaseModel): class NestedModel(BaseModel): b: float nested: NestedModel class NestedModel(BaseModel): c: float """ ) models = [module.ModelOne, module.ModelTwo, module.NestedModel] model_names = set(schema(models)['definitions'].keys()) expected_model_names = { 'ModelOne', 'ModelTwo', f'{module.__name__}__ModelOne__NestedModel', f'{module.__name__}__ModelTwo__NestedModel', f'{module.__name__}__NestedModel', } assert model_names == expected_model_names def test_multiple_enums_with_same_name(create_module): module_1 = create_module( # language=Python """ from enum import Enum from pydantic import BaseModel class MyEnum(str, Enum): a = 'a' b = 'b' c = 'c' class MyModel(BaseModel): my_enum_1: MyEnum """ ) module_2 = create_module( # language=Python """ from enum import Enum from pydantic import BaseModel class MyEnum(str, Enum): d = 'd' e = 'e' f = 'f' class MyModel(BaseModel): my_enum_2: MyEnum """ ) class Model(BaseModel): my_model_1: module_1.MyModel my_model_2: module_2.MyModel assert len(Model.schema()['definitions']) == 4 assert set(Model.schema()['definitions']) == { f'{module_1.__name__}__MyEnum', f'{module_1.__name__}__MyModel', f'{module_2.__name__}__MyEnum', f'{module_2.__name__}__MyModel', } @pytest.mark.skipif( sys.version_info < (3, 7), reason='schema generation for generic fields is not available in python < 3.7' ) def test_schema_for_generic_field(): T = TypeVar('T') class GenModel(Generic[T]): def __init__(self, data: Any): self.data = data @classmethod def __get_validators__(cls): yield cls.validate @classmethod def validate(cls, v: Any): return v class Model(BaseModel): data: GenModel[str] data1: GenModel assert Model.schema() == { 'title': 'Model', 'type': 'object', 'properties': { 'data': {'title': 'Data', 'type': 'string'}, 'data1': { 'title': 'Data1', }, }, 'required': ['data', 'data1'], } class GenModelModified(GenModel, Generic[T]): @classmethod def __modify_schema__(cls, field_schema): field_schema.pop('type', None) field_schema.update(anyOf=[{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]) class ModelModified(BaseModel): data: GenModelModified[str] data1: GenModelModified assert ModelModified.schema() == { 'title': 'ModelModified', 'type': 'object', 'properties': { 'data': {'title': 'Data', 'anyOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}, 'data1': {'title': 'Data1', 'anyOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}, }, 'required': ['data', 'data1'], }
import math import os import sys import tempfile from datetime import date, datetime, time, timedelta from decimal import Decimal from enum import Enum, IntEnum from ipaddress import IPv4Address, IPv4Interface, IPv4Network, IPv6Address, IPv6Interface, IPv6Network from pathlib import Path from typing import ( Any, Callable, Dict, FrozenSet, Generic, Iterable, List, NewType, Optional, Set, Tuple, TypeVar, Union, ) from uuid import UUID import pytest from pydantic import BaseModel, Extra, Field, ValidationError, conlist, conset, validator from pydantic.color import Color from pydantic.dataclasses import dataclass from pydantic.networks import AnyUrl, EmailStr, IPvAnyAddress, IPvAnyInterface, IPvAnyNetwork, NameEmail, stricturl from pydantic.schema import ( get_flat_models_from_model, get_flat_models_from_models, get_model_name_map, model_process_schema, model_schema, schema, ) from pydantic.types import ( UUID1, UUID3, UUID4, UUID5, ConstrainedBytes, ConstrainedDecimal, ConstrainedFloat, ConstrainedInt, ConstrainedStr, DirectoryPath, FilePath, Json, NegativeFloat, NegativeInt, NoneBytes, NoneStr, NoneStrBytes, NonNegativeFloat, NonNegativeInt, NonPositiveFloat, NonPositiveInt, PositiveFloat, PositiveInt, PyObject, SecretBytes, SecretStr, StrBytes, StrictBool, StrictStr, conbytes, condecimal, confloat, conint, constr, ) from pydantic.typing import Literal try: import email_validator except ImportError: email_validator = None def test_key(): class ApplePie(BaseModel): """ This is a test. """ a: float b: int = 10 s = { 'type': 'object', 'properties': {'a': {'type': 'number', 'title': 'A'}, 'b': {'type': 'integer', 'title': 'B', 'default': 10}}, 'required': ['a'], 'title': 'ApplePie', 'description': 'This is a test.', } assert ApplePie.__schema_cache__.keys() == set() assert ApplePie.schema() == s assert ApplePie.__schema_cache__.keys() == {(True, '#/definitions/{model}')} assert ApplePie.schema() == s def test_by_alias(): class ApplePie(BaseModel): a: float b: int = 10 class Config: title = 'Apple Pie' fields = {'a': 'Snap', 'b': 'Crackle'} assert ApplePie.schema() == { 'type': 'object', 'title': 'Apple Pie', 'properties': { 'Snap': {'type': 'number', 'title': 'Snap'}, 'Crackle': {'type': 'integer', 'title': 'Crackle', 'default': 10}, }, 'required': ['Snap'], } assert list(ApplePie.schema(by_alias=True)['properties'].keys()) == ['Snap', 'Crackle'] assert list(ApplePie.schema(by_alias=False)['properties'].keys()) == ['a', 'b'] def test_ref_template(): class KeyLimePie(BaseModel): x: str = None class ApplePie(BaseModel): a: float = None key_lime: KeyLimePie = None class Config: title = 'Apple Pie' assert ApplePie.schema(ref_template='foobar/{model}.json') == { 'title': 'Apple Pie', 'type': 'object', 'properties': {'a': {'title': 'A', 'type': 'number'}, 'key_lime': {'$ref': 'foobar/KeyLimePie.json'}}, 'definitions': { 'KeyLimePie': { 'title': 'KeyLimePie', 'type': 'object', 'properties': {'x': {'title': 'X', 'type': 'string'}}, }, }, } assert ApplePie.schema()['properties']['key_lime'] == {'$ref': '#/definitions/KeyLimePie'} json_schema = ApplePie.schema_json(ref_template='foobar/{model}.json') assert 'foobar/KeyLimePie.json' in json_schema assert '#/definitions/KeyLimePie' not in json_schema def test_by_alias_generator(): class ApplePie(BaseModel): a: float b: int = 10 class Config: @staticmethod def alias_generator(x): return x.upper() assert ApplePie.schema() == { 'title': 'ApplePie', 'type': 'object', 'properties': {'A': {'title': 'A', 'type': 'number'}, 'B': {'title': 'B', 'default': 10, 'type': 'integer'}}, 'required': ['A'], } assert ApplePie.schema(by_alias=False)['properties'].keys() == {'a', 'b'} def test_sub_model(): class Foo(BaseModel): """hello""" b: float class Bar(BaseModel): a: int b: Foo = None assert Bar.schema() == { 'type': 'object', 'title': 'Bar', 'definitions': { 'Foo': { 'type': 'object', 'title': 'Foo', 'description': 'hello', 'properties': {'b': {'type': 'number', 'title': 'B'}}, 'required': ['b'], } }, 'properties': {'a': {'type': 'integer', 'title': 'A'}, 'b': {'$ref': '#/definitions/Foo'}}, 'required': ['a'], } def test_schema_class(): class Model(BaseModel): foo: int = Field(4, title='Foo is Great') bar: str = Field(..., description='this description of bar') with pytest.raises(ValidationError): Model() m = Model(bar=123) assert m.dict() == {'foo': 4, 'bar': '123'} assert Model.schema() == { 'type': 'object', 'title': 'Model', 'properties': { 'foo': {'type': 'integer', 'title': 'Foo is Great', 'default': 4}, 'bar': {'type': 'string', 'title': 'Bar', 'description': 'this description of bar'}, }, 'required': ['bar'], } def test_schema_repr(): s = Field(4, title='Foo is Great') assert str(s) == "default=4 title='Foo is Great' extra={}" assert repr(s) == "FieldInfo(default=4, title='Foo is Great', extra={})" def test_schema_class_by_alias(): class Model(BaseModel): foo: int = Field(4, alias='foofoo') assert list(Model.schema()['properties'].keys()) == ['foofoo'] assert list(Model.schema(by_alias=False)['properties'].keys()) == ['foo'] def test_choices(): FooEnum = Enum('FooEnum', {'foo': 'f', 'bar': 'b'}) BarEnum = IntEnum('BarEnum', {'foo': 1, 'bar': 2}) class SpamEnum(str, Enum): foo = 'f' bar = 'b' class Model(BaseModel): foo: FooEnum bar: BarEnum spam: SpamEnum = Field(None) assert Model.schema() == { 'title': 'Model', 'type': 'object', 'properties': { 'foo': {'$ref': '#/definitions/FooEnum'}, 'bar': {'$ref': '#/definitions/BarEnum'}, 'spam': {'$ref': '#/definitions/SpamEnum'}, }, 'required': ['foo', 'bar'], 'definitions': { 'FooEnum': {'title': 'FooEnum', 'description': 'An enumeration.', 'enum': ['f', 'b']}, 'BarEnum': {'title': 'BarEnum', 'description': 'An enumeration.', 'type': 'integer', 'enum': [1, 2]}, 'SpamEnum': {'title': 'SpamEnum', 'description': 'An enumeration.', 'type': 'string', 'enum': ['f', 'b']}, }, } def test_enum_modify_schema(): class SpamEnum(str, Enum): foo = 'f' bar = 'b' @classmethod def __modify_schema__(cls, field_schema): field_schema['tsEnumNames'] = [e.name for e in cls] class Model(BaseModel): spam: SpamEnum = Field(None) assert Model.schema() == { 'definitions': { 'SpamEnum': { 'description': 'An enumeration.', 'enum': ['f', 'b'], 'title': 'SpamEnum', 'tsEnumNames': ['foo', 'bar'], 'type': 'string', } }, 'properties': {'spam': {'$ref': '#/definitions/SpamEnum'}}, 'title': 'Model', 'type': 'object', } def test_enum_schema_custom_field(): class FooBarEnum(str, Enum): foo = 'foo' bar = 'bar' class Model(BaseModel): pika: FooBarEnum = Field(alias='pikalias', title='Pikapika!', description='Pika is definitely the best!') bulbi: FooBarEnum = Field('foo', alias='bulbialias', title='Bulbibulbi!', description='Bulbi is not...') cara: FooBarEnum assert Model.schema() == { 'definitions': { 'FooBarEnum': { 'description': 'An enumeration.', 'enum': ['foo', 'bar'], 'title': 'FooBarEnum', 'type': 'string', } }, 'properties': { 'pikalias': { 'allOf': [{'$ref': '#/definitions/FooBarEnum'}], 'description': 'Pika is definitely the best!', 'title': 'Pikapika!', }, 'bulbialias': { 'allOf': [{'$ref': '#/definitions/FooBarEnum'}], 'description': 'Bulbi is not...', 'title': 'Bulbibulbi!', 'default': 'foo', }, 'cara': {'$ref': '#/definitions/FooBarEnum'}, }, 'required': ['pikalias', 'cara'], 'title': 'Model', 'type': 'object', } def test_enum_and_model_have_same_behaviour(): class Names(str, Enum): rick = 'Rick' morty = 'Morty' summer = 'Summer' class Pika(BaseModel): a: str class Foo(BaseModel): enum: Names titled_enum: Names = Field( ..., title='Title of enum', description='Description of enum', ) model: Pika titled_model: Pika = Field( ..., title='Title of model', description='Description of model', ) assert Foo.schema() == { 'definitions': { 'Pika': { 'properties': {'a': {'title': 'A', 'type': 'string'}}, 'required': ['a'], 'title': 'Pika', 'type': 'object', }, 'Names': { 'description': 'An enumeration.', 'enum': ['Rick', 'Morty', 'Summer'], 'title': 'Names', 'type': 'string', }, }, 'properties': { 'enum': {'$ref': '#/definitions/Names'}, 'model': {'$ref': '#/definitions/Pika'}, 'titled_enum': { 'allOf': [{'$ref': '#/definitions/Names'}], 'description': 'Description of enum', 'title': 'Title of enum', }, 'titled_model': { 'allOf': [{'$ref': '#/definitions/Pika'}], 'description': 'Description of model', 'title': 'Title of model', }, }, 'required': ['enum', 'titled_enum', 'model', 'titled_model'], 'title': 'Foo', 'type': 'object', } def test_list_enum_schema_extras(): class FoodChoice(str, Enum): spam = 'spam' egg = 'egg' chips = 'chips' class Model(BaseModel): foods: List[FoodChoice] = Field(examples=[['spam', 'egg']]) assert Model.schema() == { 'definitions': { 'FoodChoice': { 'description': 'An enumeration.', 'enum': ['spam', 'egg', 'chips'], 'title': 'FoodChoice', 'type': 'string', } }, 'properties': { 'foods': {'type': 'array', 'items': {'$ref': '#/definitions/FoodChoice'}, 'examples': [['spam', 'egg']]}, }, 'required': ['foods'], 'title': 'Model', 'type': 'object', } def test_json_schema(): class Model(BaseModel): a = b'foobar' b = Decimal('12.34') assert Model.schema_json(indent=2) == ( '{\n' ' "title": "Model",\n' ' "type": "object",\n' ' "properties": {\n' ' "a": {\n' ' "title": "A",\n' ' "default": "foobar",\n' ' "type": "string",\n' ' "format": "binary"\n' ' },\n' ' "b": {\n' ' "title": "B",\n' ' "default": 12.34,\n' ' "type": "number"\n' ' }\n' ' }\n' '}' ) def test_list_sub_model(): class Foo(BaseModel): a: float class Bar(BaseModel): b: List[Foo] assert Bar.schema() == { 'title': 'Bar', 'type': 'object', 'definitions': { 'Foo': { 'title': 'Foo', 'type': 'object', 'properties': {'a': {'type': 'number', 'title': 'A'}}, 'required': ['a'], } }, 'properties': {'b': {'type': 'array', 'items': {'$ref': '#/definitions/Foo'}, 'title': 'B'}}, 'required': ['b'], } def test_optional(): class Model(BaseModel): a: Optional[str] assert Model.schema() == {'title': 'Model', 'type': 'object', 'properties': {'a': {'type': 'string', 'title': 'A'}}} def test_any(): class Model(BaseModel): a: Any assert Model.schema() == {'title': 'Model', 'type': 'object', 'properties': {'a': {'title': 'A'}}} def test_set(): class Model(BaseModel): a: Set[int] b: set assert Model.schema() == { 'title': 'Model', 'type': 'object', 'properties': { 'a': {'title': 'A', 'type': 'array', 'uniqueItems': True, 'items': {'type': 'integer'}}, 'b': {'title': 'B', 'type': 'array', 'items': {}, 'uniqueItems': True}, }, 'required': ['a', 'b'], } def test_const_str(): class Model(BaseModel): a: str = Field('some string', const=True) assert Model.schema() == { 'title': 'Model', 'type': 'object', 'properties': {'a': {'title': 'A', 'type': 'string', 'const': 'some string'}}, } def test_const_false(): class Model(BaseModel): a: str = Field('some string', const=False) assert Model.schema() == { 'title': 'Model', 'type': 'object', 'properties': {'a': {'title': 'A', 'type': 'string', 'default': 'some string'}}, } @pytest.mark.parametrize( 'field_type,expected_schema', [ (tuple, {}), ( Tuple[str, int, Union[str, int, float], float], [ {'type': 'string'}, {'type': 'integer'}, {'anyOf': [{'type': 'string'}, {'type': 'integer'}, {'type': 'number'}]}, {'type': 'number'}, ], ), (Tuple[str], {'type': 'string'}), ], ) def test_tuple(field_type, expected_schema): class Model(BaseModel): a: field_type base_schema = { 'title': 'Model', 'type': 'object', 'properties': {'a': {'title': 'A', 'type': 'array'}}, 'required': ['a'], } base_schema['properties']['a']['items'] = expected_schema assert Model.schema() == base_schema def test_bool(): class Model(BaseModel): a: bool assert Model.schema() == { 'title': 'Model', 'type': 'object', 'properties': {'a': {'title': 'A', 'type': 'boolean'}}, 'required': ['a'], } def test_strict_bool(): class Model(BaseModel): a: StrictBool assert Model.schema() == { 'title': 'Model', 'type': 'object', 'properties': {'a': {'title': 'A', 'type': 'boolean'}}, 'required': ['a'], } def test_dict(): class Model(BaseModel): a: dict assert Model.schema() == { 'title': 'Model', 'type': 'object', 'properties': {'a': {'title': 'A', 'type': 'object'}}, 'required': ['a'], } def test_list(): class Model(BaseModel): a: list assert Model.schema() == { 'title': 'Model', 'type': 'object', 'properties': {'a': {'title': 'A', 'type': 'array', 'items': {}}}, 'required': ['a'], } class Foo(BaseModel): a: float @pytest.mark.parametrize( 'field_type,expected_schema', [ ( Union[int, str], { 'properties': {'a': {'title': 'A', 'anyOf': [{'type': 'integer'}, {'type': 'string'}]}}, 'required': ['a'], }, ), ( List[int], {'properties': {'a': {'title': 'A', 'type': 'array', 'items': {'type': 'integer'}}}, 'required': ['a']}, ), ( Dict[str, Foo], { 'definitions': { 'Foo': { 'title': 'Foo', 'type': 'object', 'properties': {'a': {'title': 'A', 'type': 'number'}}, 'required': ['a'], } }, 'properties': { 'a': {'title': 'A', 'type': 'object', 'additionalProperties': {'$ref': '#/definitions/Foo'}} }, 'required': ['a'], }, ), ( Union[None, Foo], { 'definitions': { 'Foo': { 'title': 'Foo', 'type': 'object', 'properties': {'a': {'title': 'A', 'type': 'number'}}, 'required': ['a'], } }, 'properties': {'a': {'$ref': '#/definitions/Foo'}}, }, ), (Dict[str, Any], {'properties': {'a': {'title': 'A', 'type': 'object'}}, 'required': ['a']}), ], ) def test_list_union_dict(field_type, expected_schema): class Model(BaseModel): a: field_type base_schema = {'title': 'Model', 'type': 'object'} base_schema.update(expected_schema) assert Model.schema() == base_schema @pytest.mark.parametrize( 'field_type,expected_schema', [ (datetime, {'type': 'string', 'format': 'date-time'}), (date, {'type': 'string', 'format': 'date'}), (time, {'type': 'string', 'format': 'time'}), (timedelta, {'type': 'number', 'format': 'time-delta'}), ], ) def test_date_types(field_type, expected_schema): class Model(BaseModel): a: field_type attribute_schema = {'title': 'A'} attribute_schema.update(expected_schema) base_schema = {'title': 'Model', 'type': 'object', 'properties': {'a': attribute_schema}, 'required': ['a']} assert Model.schema() == base_schema @pytest.mark.parametrize( 'field_type,expected_schema', [ (NoneStr, {'properties': {'a': {'title': 'A', 'type': 'string'}}}), (NoneBytes, {'properties': {'a': {'title': 'A', 'type': 'string', 'format': 'binary'}}}), ( StrBytes, { 'properties': { 'a': {'title': 'A', 'anyOf': [{'type': 'string'}, {'type': 'string', 'format': 'binary'}]} }, 'required': ['a'], }, ), ( NoneStrBytes, { 'properties': { 'a': {'title': 'A', 'anyOf': [{'type': 'string'}, {'type': 'string', 'format': 'binary'}]} } }, ), ], ) def test_str_basic_types(field_type, expected_schema): class Model(BaseModel): a: field_type base_schema = {'title': 'Model', 'type': 'object'} base_schema.update(expected_schema) assert Model.schema() == base_schema @pytest.mark.parametrize( 'field_type,expected_schema', [ (StrictStr, {'title': 'A', 'type': 'string'}), (ConstrainedStr, {'title': 'A', 'type': 'string'}), ( constr(min_length=3, max_length=5, regex='^text$'), {'title': 'A', 'type': 'string', 'minLength': 3, 'maxLength': 5, 'pattern': '^text$'}, ), ], ) def test_str_constrained_types(field_type, expected_schema): class Model(BaseModel): a: field_type model_schema = Model.schema() assert model_schema['properties']['a'] == expected_schema base_schema = {'title': 'Model', 'type': 'object', 'properties': {'a': expected_schema}, 'required': ['a']} assert model_schema == base_schema @pytest.mark.parametrize( 'field_type,expected_schema', [ (AnyUrl, {'title': 'A', 'type': 'string', 'format': 'uri', 'minLength': 1, 'maxLength': 2 ** 16}), ( stricturl(min_length=5, max_length=10), {'title': 'A', 'type': 'string', 'format': 'uri', 'minLength': 5, 'maxLength': 10}, ), ], ) def test_special_str_types(field_type, expected_schema): class Model(BaseModel): a: field_type base_schema = {'title': 'Model', 'type': 'object', 'properties': {'a': {}}, 'required': ['a']} base_schema['properties']['a'] = expected_schema assert Model.schema() == base_schema @pytest.mark.skipif(not email_validator, reason='email_validator not installed') @pytest.mark.parametrize('field_type,expected_schema', [(EmailStr, 'email'), (NameEmail, 'name-email')]) def test_email_str_types(field_type, expected_schema): class Model(BaseModel): a: field_type base_schema = { 'title': 'Model', 'type': 'object', 'properties': {'a': {'title': 'A', 'type': 'string'}}, 'required': ['a'], } base_schema['properties']['a']['format'] = expected_schema assert Model.schema() == base_schema @pytest.mark.parametrize('field_type,inner_type', [(SecretBytes, 'string'), (SecretStr, 'string')]) def test_secret_types(field_type, inner_type): class Model(BaseModel): a: field_type base_schema = { 'title': 'Model', 'type': 'object', 'properties': {'a': {'title': 'A', 'type': inner_type, 'writeOnly': True, 'format': 'password'}}, 'required': ['a'], } assert Model.schema() == base_schema @pytest.mark.parametrize( 'field_type,expected_schema', [ (ConstrainedInt, {}), (conint(gt=5, lt=10), {'exclusiveMinimum': 5, 'exclusiveMaximum': 10}), (conint(ge=5, le=10), {'minimum': 5, 'maximum': 10}), (conint(multiple_of=5), {'multipleOf': 5}), (PositiveInt, {'exclusiveMinimum': 0}), (NegativeInt, {'exclusiveMaximum': 0}), (NonNegativeInt, {'minimum': 0}), (NonPositiveInt, {'maximum': 0}), ], ) def test_special_int_types(field_type, expected_schema): class Model(BaseModel): a: field_type base_schema = { 'title': 'Model', 'type': 'object', 'properties': {'a': {'title': 'A', 'type': 'integer'}}, 'required': ['a'], } base_schema['properties']['a'].update(expected_schema) assert Model.schema() == base_schema @pytest.mark.parametrize( 'field_type,expected_schema', [ (ConstrainedFloat, {}), (confloat(gt=5, lt=10), {'exclusiveMinimum': 5, 'exclusiveMaximum': 10}), (confloat(ge=5, le=10), {'minimum': 5, 'maximum': 10}), (confloat(multiple_of=5), {'multipleOf': 5}), (PositiveFloat, {'exclusiveMinimum': 0}), (NegativeFloat, {'exclusiveMaximum': 0}), (NonNegativeFloat, {'minimum': 0}), (NonPositiveFloat, {'maximum': 0}), (ConstrainedDecimal, {}), (condecimal(gt=5, lt=10), {'exclusiveMinimum': 5, 'exclusiveMaximum': 10}), (condecimal(ge=5, le=10), {'minimum': 5, 'maximum': 10}), (condecimal(multiple_of=5), {'multipleOf': 5}), ], ) def test_special_float_types(field_type, expected_schema): class Model(BaseModel): a: field_type base_schema = { 'title': 'Model', 'type': 'object', 'properties': {'a': {'title': 'A', 'type': 'number'}}, 'required': ['a'], } base_schema['properties']['a'].update(expected_schema) assert Model.schema() == base_schema @pytest.mark.parametrize( 'field_type,expected_schema', [(UUID, 'uuid'), (UUID1, 'uuid1'), (UUID3, 'uuid3'), (UUID4, 'uuid4'), (UUID5, 'uuid5')], ) def test_uuid_types(field_type, expected_schema): class Model(BaseModel): a: field_type base_schema = { 'title': 'Model', 'type': 'object', 'properties': {'a': {'title': 'A', 'type': 'string', 'format': ''}}, 'required': ['a'], } base_schema['properties']['a']['format'] = expected_schema assert Model.schema() == base_schema @pytest.mark.parametrize( 'field_type,expected_schema', [(FilePath, 'file-path'), (DirectoryPath, 'directory-path'), (Path, 'path')] ) def test_path_types(field_type, expected_schema): class Model(BaseModel): a: field_type base_schema = { 'title': 'Model', 'type': 'object', 'properties': {'a': {'title': 'A', 'type': 'string', 'format': ''}}, 'required': ['a'], } base_schema['properties']['a']['format'] = expected_schema assert Model.schema() == base_schema def test_json_type(): class Model(BaseModel): a: Json b: Json[int] assert Model.schema() == { 'title': 'Model', 'type': 'object', 'properties': { 'a': {'title': 'A', 'type': 'string', 'format': 'json-string'}, 'b': {'title': 'B', 'type': 'integer'}, }, 'required': ['b'], } def test_ipv4address_type(): class Model(BaseModel): ip_address: IPv4Address model_schema = Model.schema() assert model_schema == { 'title': 'Model', 'type': 'object', 'properties': {'ip_address': {'title': 'Ip Address', 'type': 'string', 'format': 'ipv4'}}, 'required': ['ip_address'], } def test_ipv6address_type(): class Model(BaseModel): ip_address: IPv6Address model_schema = Model.schema() assert model_schema == { 'title': 'Model', 'type': 'object', 'properties': {'ip_address': {'title': 'Ip Address', 'type': 'string', 'format': 'ipv6'}}, 'required': ['ip_address'], } def test_ipvanyaddress_type(): class Model(BaseModel): ip_address: IPvAnyAddress model_schema = Model.schema() assert model_schema == { 'title': 'Model', 'type': 'object', 'properties': {'ip_address': {'title': 'Ip Address', 'type': 'string', 'format': 'ipvanyaddress'}}, 'required': ['ip_address'], } def test_ipv4interface_type(): class Model(BaseModel): ip_interface: IPv4Interface model_schema = Model.schema() assert model_schema == { 'title': 'Model', 'type': 'object', 'properties': {'ip_interface': {'title': 'Ip Interface', 'type': 'string', 'format': 'ipv4interface'}}, 'required': ['ip_interface'], } def test_ipv6interface_type(): class Model(BaseModel): ip_interface: IPv6Interface model_schema = Model.schema() assert model_schema == { 'title': 'Model', 'type': 'object', 'properties': {'ip_interface': {'title': 'Ip Interface', 'type': 'string', 'format': 'ipv6interface'}}, 'required': ['ip_interface'], } def test_ipvanyinterface_type(): class Model(BaseModel): ip_interface: IPvAnyInterface model_schema = Model.schema() assert model_schema == { 'title': 'Model', 'type': 'object', 'properties': {'ip_interface': {'title': 'Ip Interface', 'type': 'string', 'format': 'ipvanyinterface'}}, 'required': ['ip_interface'], } def test_ipv4network_type(): class Model(BaseModel): ip_network: IPv4Network model_schema = Model.schema() assert model_schema == { 'title': 'Model', 'type': 'object', 'properties': {'ip_network': {'title': 'Ip Network', 'type': 'string', 'format': 'ipv4network'}}, 'required': ['ip_network'], } def test_ipv6network_type(): class Model(BaseModel): ip_network: IPv6Network model_schema = Model.schema() assert model_schema == { 'title': 'Model', 'type': 'object', 'properties': {'ip_network': {'title': 'Ip Network', 'type': 'string', 'format': 'ipv6network'}}, 'required': ['ip_network'], } def test_ipvanynetwork_type(): class Model(BaseModel): ip_network: IPvAnyNetwork model_schema = Model.schema() assert model_schema == { 'title': 'Model', 'type': 'object', 'properties': {'ip_network': {'title': 'Ip Network', 'type': 'string', 'format': 'ipvanynetwork'}}, 'required': ['ip_network'], } @pytest.mark.parametrize( 'type_,default_value', ( (Callable, ...), (Callable, lambda x: x), (Callable[[int], int], ...), (Callable[[int], int], lambda x: x), ), ) def test_callable_type(type_, default_value): class Model(BaseModel): callback: type_ = default_value foo: int with pytest.warns(UserWarning): model_schema = Model.schema() assert 'callback' not in model_schema['properties'] def test_error_non_supported_types(): class Model(BaseModel): a: PyObject with pytest.raises(ValueError): Model.schema() def create_testing_submodules(): base_path = Path(tempfile.mkdtemp()) mod_root_path = base_path / 'pydantic_schema_test' os.makedirs(mod_root_path, exist_ok=True) open(mod_root_path / '__init__.py', 'w').close() for mod in ['a', 'b', 'c']: module_name = 'module' + mod model_name = 'model' + mod + '.py' os.makedirs(mod_root_path / module_name, exist_ok=True) open(mod_root_path / module_name / '__init__.py', 'w').close() with open(mod_root_path / module_name / model_name, 'w') as f: f.write('from pydantic import BaseModel\n' 'class Model(BaseModel):\n' ' a: str\n') module_name = 'moduled' model_name = 'modeld.py' os.makedirs(mod_root_path / module_name, exist_ok=True) open(mod_root_path / module_name / '__init__.py', 'w').close() with open(mod_root_path / module_name / model_name, 'w') as f: f.write('from ..moduleb.modelb import Model') sys.path.insert(0, str(base_path)) def test_flat_models_unique_models(): create_testing_submodules() from pydantic_schema_test.modulea.modela import Model as ModelA from pydantic_schema_test.moduleb.modelb import Model as ModelB from pydantic_schema_test.moduled.modeld import Model as ModelD flat_models = get_flat_models_from_models([ModelA, ModelB, ModelD]) assert flat_models == set([ModelA, ModelB]) def test_flat_models_with_submodels(): class Foo(BaseModel): a: str class Bar(BaseModel): b: List[Foo] class Baz(BaseModel): c: Dict[str, Bar] flat_models = get_flat_models_from_model(Baz) assert flat_models == set([Foo, Bar, Baz]) def test_flat_models_with_submodels_from_sequence(): class Foo(BaseModel): a: str class Bar(BaseModel): b: Foo class Ingredient(BaseModel): name: str class Pizza(BaseModel): name: str ingredients: List[Ingredient] flat_models = get_flat_models_from_models([Bar, Pizza]) assert flat_models == set([Foo, Bar, Ingredient, Pizza]) def test_model_name_maps(): create_testing_submodules() from pydantic_schema_test.modulea.modela import Model as ModelA from pydantic_schema_test.moduleb.modelb import Model as ModelB from pydantic_schema_test.modulec.modelc import Model as ModelC from pydantic_schema_test.moduled.modeld import Model as ModelD class Foo(BaseModel): a: str class Bar(BaseModel): b: Foo class Baz(BaseModel): c: Bar flat_models = get_flat_models_from_models([Baz, ModelA, ModelB, ModelC, ModelD]) model_name_map = get_model_name_map(flat_models) assert model_name_map == { Foo: 'Foo', Bar: 'Bar', Baz: 'Baz', ModelA: 'pydantic_schema_test__modulea__modela__Model', ModelB: 'pydantic_schema_test__moduleb__modelb__Model', ModelC: 'pydantic_schema_test__modulec__modelc__Model', } def test_schema_overrides(): class Foo(BaseModel): a: str class Bar(BaseModel): b: Foo = Foo(a='foo') class Baz(BaseModel): c: Optional[Bar] class Model(BaseModel): d: Baz model_schema = Model.schema() assert model_schema == { 'title': 'Model', 'type': 'object', 'definitions': { 'Foo': { 'title': 'Foo', 'type': 'object', 'properties': {'a': {'title': 'A', 'type': 'string'}}, 'required': ['a'], }, 'Bar': { 'title': 'Bar', 'type': 'object', 'properties': {'b': {'title': 'B', 'default': {'a': 'foo'}, 'allOf': [{'$ref': '#/definitions/Foo'}]}}, }, 'Baz': {'title': 'Baz', 'type': 'object', 'properties': {'c': {'$ref': '#/definitions/Bar'}}}, }, 'properties': {'d': {'$ref': '#/definitions/Baz'}}, 'required': ['d'], } def test_schema_overrides_w_union(): class Foo(BaseModel): pass class Bar(BaseModel): pass class Spam(BaseModel): a: Union[Foo, Bar] = Field(..., description='xxx') assert Spam.schema()['properties'] == { 'a': { 'title': 'A', 'description': 'xxx', 'anyOf': [{'$ref': '#/definitions/Foo'}, {'$ref': '#/definitions/Bar'}], }, } def test_schema_from_models(): class Foo(BaseModel): a: str class Bar(BaseModel): b: Foo class Baz(BaseModel): c: Bar class Model(BaseModel): d: Baz class Ingredient(BaseModel): name: str class Pizza(BaseModel): name: str ingredients: List[Ingredient] model_schema = schema( [Model, Pizza], title='Multi-model schema', description='Single JSON Schema with multiple definitions' ) assert model_schema == { 'title': 'Multi-model schema', 'description': 'Single JSON Schema with multiple definitions', 'definitions': { 'Pizza': { 'title': 'Pizza', 'type': 'object', 'properties': { 'name': {'title': 'Name', 'type': 'string'}, 'ingredients': { 'title': 'Ingredients', 'type': 'array', 'items': {'$ref': '#/definitions/Ingredient'}, }, }, 'required': ['name', 'ingredients'], }, 'Ingredient': { 'title': 'Ingredient', 'type': 'object', 'properties': {'name': {'title': 'Name', 'type': 'string'}}, 'required': ['name'], }, 'Model': { 'title': 'Model', 'type': 'object', 'properties': {'d': {'$ref': '#/definitions/Baz'}}, 'required': ['d'], }, 'Baz': { 'title': 'Baz', 'type': 'object', 'properties': {'c': {'$ref': '#/definitions/Bar'}}, 'required': ['c'], }, 'Bar': { 'title': 'Bar', 'type': 'object', 'properties': {'b': {'$ref': '#/definitions/Foo'}}, 'required': ['b'], }, 'Foo': { 'title': 'Foo', 'type': 'object', 'properties': {'a': {'title': 'A', 'type': 'string'}}, 'required': ['a'], }, }, } @pytest.mark.parametrize( 'ref_prefix,ref_template', [ # OpenAPI style ('#/components/schemas/', None), (None, '#/components/schemas/{model}'), # ref_prefix takes priority ('#/components/schemas/', '#/{model}/schemas/'), ], ) def test_schema_with_refs(ref_prefix, ref_template): class Foo(BaseModel): a: str class Bar(BaseModel): b: Foo class Baz(BaseModel): c: Bar model_schema = schema([Bar, Baz], ref_prefix=ref_prefix, ref_template=ref_template) assert model_schema == { 'definitions': { 'Baz': { 'title': 'Baz', 'type': 'object', 'properties': {'c': {'$ref': '#/components/schemas/Bar'}}, 'required': ['c'], }, 'Bar': { 'title': 'Bar', 'type': 'object', 'properties': {'b': {'$ref': '#/components/schemas/Foo'}}, 'required': ['b'], }, 'Foo': { 'title': 'Foo', 'type': 'object', 'properties': {'a': {'title': 'A', 'type': 'string'}}, 'required': ['a'], }, } } def test_schema_with_custom_ref_template(): class Foo(BaseModel): a: str class Bar(BaseModel): b: Foo class Baz(BaseModel): c: Bar model_schema = schema([Bar, Baz], ref_template='/schemas/{model}.json#/') assert model_schema == { 'definitions': { 'Baz': { 'title': 'Baz', 'type': 'object', 'properties': {'c': {'$ref': '/schemas/Bar.json#/'}}, 'required': ['c'], }, 'Bar': { 'title': 'Bar', 'type': 'object', 'properties': {'b': {'$ref': '/schemas/Foo.json#/'}}, 'required': ['b'], }, 'Foo': { 'title': 'Foo', 'type': 'object', 'properties': {'a': {'title': 'A', 'type': 'string'}}, 'required': ['a'], }, } } def test_schema_ref_template_key_error(): class Foo(BaseModel): a: str class Bar(BaseModel): b: Foo class Baz(BaseModel): c: Bar with pytest.raises(KeyError): schema([Bar, Baz], ref_template='/schemas/{bad_name}.json#/') def test_schema_no_definitions(): model_schema = schema([], title='Schema without definitions') assert model_schema == {'title': 'Schema without definitions'} def test_list_default(): class UserModel(BaseModel): friends: List[int] = [1] assert UserModel.schema() == { 'title': 'UserModel', 'type': 'object', 'properties': {'friends': {'title': 'Friends', 'default': [1], 'type': 'array', 'items': {'type': 'integer'}}}, } def test_dict_default(): class UserModel(BaseModel): friends: Dict[str, float] = {'a': 1.1, 'b': 2.2} assert UserModel.schema() == { 'title': 'UserModel', 'type': 'object', 'properties': { 'friends': { 'title': 'Friends', 'default': {'a': 1.1, 'b': 2.2}, 'type': 'object', 'additionalProperties': {'type': 'number'}, } }, } @pytest.mark.parametrize( 'kwargs,type_,expected_extra', [ ({'max_length': 5}, str, {'type': 'string', 'maxLength': 5}), ({}, constr(max_length=6), {'type': 'string', 'maxLength': 6}), ({'min_length': 2}, str, {'type': 'string', 'minLength': 2}), ({'max_length': 5}, bytes, {'type': 'string', 'maxLength': 5, 'format': 'binary'}), ({'regex': '^foo$'}, str, {'type': 'string', 'pattern': '^foo$'}), ({'gt': 2}, int, {'type': 'integer', 'exclusiveMinimum': 2}), ({'lt': 5}, int, {'type': 'integer', 'exclusiveMaximum': 5}), ({'ge': 2}, int, {'type': 'integer', 'minimum': 2}), ({'le': 5}, int, {'type': 'integer', 'maximum': 5}), ({'multiple_of': 5}, int, {'type': 'integer', 'multipleOf': 5}), ({'gt': 2}, float, {'type': 'number', 'exclusiveMinimum': 2}), ({'lt': 5}, float, {'type': 'number', 'exclusiveMaximum': 5}), ({'ge': 2}, float, {'type': 'number', 'minimum': 2}), ({'le': 5}, float, {'type': 'number', 'maximum': 5}), ({'gt': -math.inf}, float, {'type': 'number'}), ({'lt': math.inf}, float, {'type': 'number'}), ({'ge': -math.inf}, float, {'type': 'number'}), ({'le': math.inf}, float, {'type': 'number'}), ({'multiple_of': 5}, float, {'type': 'number', 'multipleOf': 5}), ({'gt': 2}, Decimal, {'type': 'number', 'exclusiveMinimum': 2}), ({'lt': 5}, Decimal, {'type': 'number', 'exclusiveMaximum': 5}), ({'ge': 2}, Decimal, {'type': 'number', 'minimum': 2}), ({'le': 5}, Decimal, {'type': 'number', 'maximum': 5}), ({'multiple_of': 5}, Decimal, {'type': 'number', 'multipleOf': 5}), ], ) def test_constraints_schema(kwargs, type_, expected_extra): class Foo(BaseModel): a: type_ = Field('foo', title='A title', description='A description', **kwargs) expected_schema = { 'title': 'Foo', 'type': 'object', 'properties': {'a': {'title': 'A title', 'description': 'A description', 'default': 'foo'}}, } expected_schema['properties']['a'].update(expected_extra) assert Foo.schema() == expected_schema @pytest.mark.parametrize( 'kwargs,type_', [ ({'max_length': 5}, int), ({'min_length': 2}, float), ({'max_length': 5}, Decimal), ({'allow_mutation': False}, bool), ({'regex': '^foo$'}, int), ({'gt': 2}, str), ({'lt': 5}, bytes), ({'ge': 2}, str), ({'le': 5}, bool), ({'gt': 0}, Callable), ({'gt': 0}, Callable[[int], int]), ({'gt': 0}, conlist(int, min_items=4)), ({'gt': 0}, conset(int, min_items=4)), ], ) def test_unenforced_constraints_schema(kwargs, type_): with pytest.raises(ValueError, match='On field "a" the following field constraints are set but not enforced'): class Foo(BaseModel): a: type_ = Field('foo', title='A title', description='A description', **kwargs) @pytest.mark.parametrize( 'kwargs,type_,value', [ ({'max_length': 5}, str, 'foo'), ({'min_length': 2}, str, 'foo'), ({'max_length': 5}, bytes, b'foo'), ({'regex': '^foo$'}, str, 'foo'), ({'gt': 2}, int, 3), ({'lt': 5}, int, 3), ({'ge': 2}, int, 3), ({'ge': 2}, int, 2), ({'gt': 2}, int, '3'), ({'le': 5}, int, 3), ({'le': 5}, int, 5), ({'gt': 2}, float, 3.0), ({'gt': 2}, float, 2.1), ({'lt': 5}, float, 3.0), ({'lt': 5}, float, 4.9), ({'ge': 2}, float, 3.0), ({'ge': 2}, float, 2.0), ({'le': 5}, float, 3.0), ({'le': 5}, float, 5.0), ({'gt': 2}, float, 3), ({'gt': 2}, float, '3'), ({'gt': 2}, Decimal, Decimal(3)), ({'lt': 5}, Decimal, Decimal(3)), ({'ge': 2}, Decimal, Decimal(3)), ({'ge': 2}, Decimal, Decimal(2)), ({'le': 5}, Decimal, Decimal(3)), ({'le': 5}, Decimal, Decimal(5)), ], ) def test_constraints_schema_validation(kwargs, type_, value): class Foo(BaseModel): a: type_ = Field('foo', title='A title', description='A description', **kwargs) assert Foo(a=value) @pytest.mark.parametrize( 'kwargs,type_,value', [ ({'max_length': 5}, str, 'foobar'), ({'min_length': 2}, str, 'f'), ({'regex': '^foo$'}, str, 'bar'), ({'gt': 2}, int, 2), ({'lt': 5}, int, 5), ({'ge': 2}, int, 1), ({'le': 5}, int, 6), ({'gt': 2}, float, 2.0), ({'lt': 5}, float, 5.0), ({'ge': 2}, float, 1.9), ({'le': 5}, float, 5.1), ({'gt': 2}, Decimal, Decimal(2)), ({'lt': 5}, Decimal, Decimal(5)), ({'ge': 2}, Decimal, Decimal(1)), ({'le': 5}, Decimal, Decimal(6)), ], ) def test_constraints_schema_validation_raises(kwargs, type_, value): class Foo(BaseModel): a: type_ = Field('foo', title='A title', description='A description', **kwargs) with pytest.raises(ValidationError): Foo(a=value) def test_schema_kwargs(): class Foo(BaseModel): a: str = Field('foo', examples=['bar']) assert Foo.schema() == { 'title': 'Foo', 'type': 'object', 'properties': {'a': {'type': 'string', 'title': 'A', 'default': 'foo', 'examples': ['bar']}}, } def test_schema_dict_constr(): regex_str = r'^([a-zA-Z_][a-zA-Z0-9_]*)$' ConStrType = constr(regex=regex_str) ConStrKeyDict = Dict[ConStrType, str] class Foo(BaseModel): a: ConStrKeyDict = {} assert Foo.schema() == { 'title': 'Foo', 'type': 'object', 'properties': { 'a': {'type': 'object', 'title': 'A', 'default': {}, 'patternProperties': {regex_str: {'type': 'string'}}} }, } @pytest.mark.parametrize( 'field_type,expected_schema', [ (ConstrainedBytes, {'title': 'A', 'type': 'string', 'format': 'binary'}), ( conbytes(min_length=3, max_length=5), {'title': 'A', 'type': 'string', 'format': 'binary', 'minLength': 3, 'maxLength': 5}, ), ], ) def test_bytes_constrained_types(field_type, expected_schema): class Model(BaseModel): a: field_type base_schema = {'title': 'Model', 'type': 'object', 'properties': {'a': {}}, 'required': ['a']} base_schema['properties']['a'] = expected_schema assert Model.schema() == base_schema def test_optional_dict(): class Model(BaseModel): something: Optional[Dict[str, Any]] assert Model.schema() == { 'title': 'Model', 'type': 'object', 'properties': {'something': {'title': 'Something', 'type': 'object'}}, } assert Model().dict() == {'something': None} assert Model(something={'foo': 'Bar'}).dict() == {'something': {'foo': 'Bar'}} def test_optional_validator(): class Model(BaseModel): something: Optional[str] @validator('something', always=True) def check_something(cls, v): assert v is None or 'x' not in v, 'should not contain x' return v assert Model.schema() == { 'title': 'Model', 'type': 'object', 'properties': {'something': {'title': 'Something', 'type': 'string'}}, } assert Model().dict() == {'something': None} assert Model(something=None).dict() == {'something': None} assert Model(something='hello').dict() == {'something': 'hello'} def test_field_with_validator(): class Model(BaseModel): something: Optional[int] = None @validator('something') def check_field(cls, v, *, values, config, field): return v assert Model.schema() == { 'title': 'Model', 'type': 'object', 'properties': {'something': {'type': 'integer', 'title': 'Something'}}, } def test_unparameterized_schema_generation(): class FooList(BaseModel): d: List class BarList(BaseModel): d: list assert model_schema(FooList) == { 'title': 'FooList', 'type': 'object', 'properties': {'d': {'items': {}, 'title': 'D', 'type': 'array'}}, 'required': ['d'], } foo_list_schema = model_schema(FooList) bar_list_schema = model_schema(BarList) bar_list_schema['title'] = 'FooList' # to check for equality assert foo_list_schema == bar_list_schema class FooDict(BaseModel): d: Dict class BarDict(BaseModel): d: dict model_schema(Foo) assert model_schema(FooDict) == { 'title': 'FooDict', 'type': 'object', 'properties': {'d': {'title': 'D', 'type': 'object'}}, 'required': ['d'], } foo_dict_schema = model_schema(FooDict) bar_dict_schema = model_schema(BarDict) bar_dict_schema['title'] = 'FooDict' # to check for equality assert foo_dict_schema == bar_dict_schema def test_known_model_optimization(): class Dep(BaseModel): number: int class Model(BaseModel): dep: Dep dep_l: List[Dep] expected = { 'title': 'Model', 'type': 'object', 'properties': { 'dep': {'$ref': '#/definitions/Dep'}, 'dep_l': {'title': 'Dep L', 'type': 'array', 'items': {'$ref': '#/definitions/Dep'}}, }, 'required': ['dep', 'dep_l'], 'definitions': { 'Dep': { 'title': 'Dep', 'type': 'object', 'properties': {'number': {'title': 'Number', 'type': 'integer'}}, 'required': ['number'], } }, } assert Model.schema() == expected def test_root(): class Model(BaseModel): __root__: str assert Model.schema() == {'title': 'Model', 'type': 'string'} def test_root_list(): class Model(BaseModel): __root__: List[str] assert Model.schema() == {'title': 'Model', 'type': 'array', 'items': {'type': 'string'}} def test_root_nested_model(): class NestedModel(BaseModel): a: str class Model(BaseModel): __root__: List[NestedModel] assert Model.schema() == { 'title': 'Model', 'type': 'array', 'items': {'$ref': '#/definitions/NestedModel'}, 'definitions': { 'NestedModel': { 'title': 'NestedModel', 'type': 'object', 'properties': {'a': {'title': 'A', 'type': 'string'}}, 'required': ['a'], } }, } def test_new_type_schema(): a_type = NewType('a_type', int) b_type = NewType('b_type', a_type) c_type = NewType('c_type', str) class Model(BaseModel): a: a_type b: b_type c: c_type assert Model.schema() == { 'properties': { 'a': {'title': 'A', 'type': 'integer'}, 'b': {'title': 'B', 'type': 'integer'}, 'c': {'title': 'C', 'type': 'string'}, }, 'required': ['a', 'b', 'c'], 'title': 'Model', 'type': 'object', } @pytest.mark.skipif(not Literal, reason='typing_extensions not installed and python version < 3.8') def test_literal_schema(): class Model(BaseModel): a: Literal[1] b: Literal['a'] c: Literal['a', 1] assert Model.schema() == { 'properties': { 'a': {'title': 'A', 'type': 'integer', 'const': 1}, 'b': {'title': 'B', 'type': 'string', 'const': 'a'}, 'c': {'anyOf': [{'type': 'string', 'const': 'a'}, {'type': 'integer', 'const': 1}], 'title': 'C'}, }, 'required': ['a', 'b', 'c'], 'title': 'Model', 'type': 'object', } def test_color_type(): class Model(BaseModel): color: Color model_schema = Model.schema() assert model_schema == { 'title': 'Model', 'type': 'object', 'properties': {'color': {'title': 'Color', 'type': 'string', 'format': 'color'}}, 'required': ['color'], } def test_model_with_schema_extra(): class Model(BaseModel): a: str class Config: schema_extra = {'examples': [{'a': 'Foo'}]} assert Model.schema() == { 'title': 'Model', 'type': 'object', 'properties': {'a': {'title': 'A', 'type': 'string'}}, 'required': ['a'], 'examples': [{'a': 'Foo'}], } def test_model_with_schema_extra_callable(): class Model(BaseModel): name: str = None class Config: @staticmethod def schema_extra(schema, model_class): schema.pop('properties') schema['type'] = 'override' assert model_class is Model assert Model.schema() == {'title': 'Model', 'type': 'override'} def test_model_with_schema_extra_callable_no_model_class(): class Model(BaseModel): name: str = None class Config: @staticmethod def schema_extra(schema): schema.pop('properties') schema['type'] = 'override' assert Model.schema() == {'title': 'Model', 'type': 'override'} def test_model_with_schema_extra_callable_classmethod(): class Model(BaseModel): name: str = None class Config: type = 'foo' @classmethod def schema_extra(cls, schema, model_class): schema.pop('properties') schema['type'] = cls.type assert model_class is Model assert Model.schema() == {'title': 'Model', 'type': 'foo'} def test_model_with_schema_extra_callable_instance_method(): class Model(BaseModel): name: str = None class Config: def schema_extra(schema, model_class): schema.pop('properties') schema['type'] = 'override' assert model_class is Model assert Model.schema() == {'title': 'Model', 'type': 'override'} def test_model_with_extra_forbidden(): class Model(BaseModel): a: str class Config: extra = Extra.forbid assert Model.schema() == { 'title': 'Model', 'type': 'object', 'properties': {'a': {'title': 'A', 'type': 'string'}}, 'required': ['a'], 'additionalProperties': False, } @pytest.mark.parametrize( 'annotation,kwargs,field_schema', [ (int, dict(gt=0), {'title': 'A', 'exclusiveMinimum': 0, 'type': 'integer'}), (Optional[int], dict(gt=0), {'title': 'A', 'exclusiveMinimum': 0, 'type': 'integer'}), ( Tuple[int, ...], dict(gt=0), {'title': 'A', 'exclusiveMinimum': 0, 'type': 'array', 'items': {'exclusiveMinimum': 0, 'type': 'integer'}}, ), ( Tuple[int, int, int], dict(gt=0), { 'title': 'A', 'type': 'array', 'items': [ {'exclusiveMinimum': 0, 'type': 'integer'}, {'exclusiveMinimum': 0, 'type': 'integer'}, {'exclusiveMinimum': 0, 'type': 'integer'}, ], }, ), ( Union[int, float], dict(gt=0), { 'title': 'A', 'anyOf': [{'exclusiveMinimum': 0, 'type': 'integer'}, {'exclusiveMinimum': 0, 'type': 'number'}], }, ), ( List[int], dict(gt=0), {'title': 'A', 'exclusiveMinimum': 0, 'type': 'array', 'items': {'exclusiveMinimum': 0, 'type': 'integer'}}, ), ( Dict[str, int], dict(gt=0), { 'title': 'A', 'exclusiveMinimum': 0, 'type': 'object', 'additionalProperties': {'exclusiveMinimum': 0, 'type': 'integer'}, }, ), ( Union[str, int], dict(gt=0, max_length=5), {'title': 'A', 'anyOf': [{'maxLength': 5, 'type': 'string'}, {'exclusiveMinimum': 0, 'type': 'integer'}]}, ), ], ) def test_enforced_constraints(annotation, kwargs, field_schema): class Model(BaseModel): a: annotation = Field(..., **kwargs) schema = Model.schema() # debug(schema['properties']['a']) assert schema['properties']['a'] == field_schema def test_real_vs_phony_constraints(): class Model1(BaseModel): foo: int = Field(..., gt=123) class Config: title = 'Test Model' class Model2(BaseModel): foo: int = Field(..., exclusiveMinimum=123) class Config: title = 'Test Model' with pytest.raises(ValidationError, match='ensure this value is greater than 123'): Model1(foo=122) assert Model2(foo=122).dict() == {'foo': 122} assert ( Model1.schema() == Model2.schema() == { 'title': 'Test Model', 'type': 'object', 'properties': {'foo': {'title': 'Foo', 'exclusiveMinimum': 123, 'type': 'integer'}}, 'required': ['foo'], } ) def test_subfield_field_info(): class MyModel(BaseModel): entries: Dict[str, List[int]] assert MyModel.schema() == { 'title': 'MyModel', 'type': 'object', 'properties': { 'entries': { 'title': 'Entries', 'type': 'object', 'additionalProperties': {'type': 'array', 'items': {'type': 'integer'}}, } }, 'required': ['entries'], } def test_dataclass(): @dataclass class Model: a: bool assert schema([Model]) == { 'definitions': { 'Model': { 'title': 'Model', 'type': 'object', 'properties': {'a': {'title': 'A', 'type': 'boolean'}}, 'required': ['a'], } } } assert model_schema(Model) == { 'title': 'Model', 'type': 'object', 'properties': {'a': {'title': 'A', 'type': 'boolean'}}, 'required': ['a'], } def test_schema_attributes(): class ExampleEnum(Enum): """This is a test description.""" gt = 'GT' lt = 'LT' ge = 'GE' le = 'LE' max_length = 'ML' multiple_of = 'MO' regex = 'RE' class Example(BaseModel): example: ExampleEnum assert Example.schema() == { 'title': 'Example', 'type': 'object', 'properties': {'example': {'$ref': '#/definitions/ExampleEnum'}}, 'required': ['example'], 'definitions': { 'ExampleEnum': { 'title': 'ExampleEnum', 'description': 'This is a test description.', 'enum': ['GT', 'LT', 'GE', 'LE', 'ML', 'MO', 'RE'], } }, } def test_model_process_schema_enum(): class SpamEnum(str, Enum): foo = 'f' bar = 'b' model_schema, _, _ = model_process_schema(SpamEnum, model_name_map={}) assert model_schema == {'title': 'SpamEnum', 'description': 'An enumeration.', 'type': 'string', 'enum': ['f', 'b']} def test_path_modify_schema(): class MyPath(Path): @classmethod def __modify_schema__(cls, schema): schema.update(foobar=123) class Model(BaseModel): path1: Path path2: MyPath path3: List[MyPath] assert Model.schema() == { 'title': 'Model', 'type': 'object', 'properties': { 'path1': {'title': 'Path1', 'type': 'string', 'format': 'path'}, 'path2': {'title': 'Path2', 'type': 'string', 'format': 'path', 'foobar': 123}, 'path3': {'title': 'Path3', 'type': 'array', 'items': {'type': 'string', 'format': 'path', 'foobar': 123}}, }, 'required': ['path1', 'path2', 'path3'], } def test_frozen_set(): class Model(BaseModel): a: FrozenSet[int] = frozenset({1, 2, 3}) b: FrozenSet = frozenset({1, 2, 3}) c: frozenset = frozenset({1, 2, 3}) d: frozenset = ... assert Model.schema() == { 'title': 'Model', 'type': 'object', 'properties': { 'a': { 'title': 'A', 'default': frozenset({1, 2, 3}), 'type': 'array', 'items': {'type': 'integer'}, 'uniqueItems': True, }, 'b': {'title': 'B', 'default': frozenset({1, 2, 3}), 'type': 'array', 'items': {}, 'uniqueItems': True}, 'c': {'title': 'C', 'default': frozenset({1, 2, 3}), 'type': 'array', 'items': {}, 'uniqueItems': True}, 'd': {'title': 'D', 'type': 'array', 'items': {}, 'uniqueItems': True}, }, 'required': ['d'], } def test_iterable(): class Model(BaseModel): a: Iterable[int] assert Model.schema() == { 'title': 'Model', 'type': 'object', 'properties': {'a': {'title': 'A', 'type': 'array', 'items': {'type': 'integer'}}}, 'required': ['a'], } def test_new_type(): new_type = NewType('NewStr', str) class Model(BaseModel): a: new_type assert Model.schema() == { 'title': 'Model', 'type': 'object', 'properties': {'a': {'title': 'A', 'type': 'string'}}, 'required': ['a'], } def test_multiple_models_with_same_name(create_module): module = create_module( # language=Python """ from pydantic import BaseModel class ModelOne(BaseModel): class NestedModel(BaseModel): a: float nested: NestedModel class ModelTwo(BaseModel): class NestedModel(BaseModel): b: float nested: NestedModel class NestedModel(BaseModel): c: float """ ) models = [module.ModelOne, module.ModelTwo, module.NestedModel] model_names = set(schema(models)['definitions'].keys()) expected_model_names = { 'ModelOne', 'ModelTwo', f'{module.__name__}__ModelOne__NestedModel', f'{module.__name__}__ModelTwo__NestedModel', f'{module.__name__}__NestedModel', } assert model_names == expected_model_names def test_multiple_enums_with_same_name(create_module): module_1 = create_module( # language=Python """ from enum import Enum from pydantic import BaseModel class MyEnum(str, Enum): a = 'a' b = 'b' c = 'c' class MyModel(BaseModel): my_enum_1: MyEnum """ ) module_2 = create_module( # language=Python """ from enum import Enum from pydantic import BaseModel class MyEnum(str, Enum): d = 'd' e = 'e' f = 'f' class MyModel(BaseModel): my_enum_2: MyEnum """ ) class Model(BaseModel): my_model_1: module_1.MyModel my_model_2: module_2.MyModel assert len(Model.schema()['definitions']) == 4 assert set(Model.schema()['definitions']) == { f'{module_1.__name__}__MyEnum', f'{module_1.__name__}__MyModel', f'{module_2.__name__}__MyEnum', f'{module_2.__name__}__MyModel', } @pytest.mark.skipif( sys.version_info < (3, 7), reason='schema generation for generic fields is not available in python < 3.7' ) def test_schema_for_generic_field(): T = TypeVar('T') class GenModel(Generic[T]): def __init__(self, data: Any): self.data = data @classmethod def __get_validators__(cls): yield cls.validate @classmethod def validate(cls, v: Any): return v class Model(BaseModel): data: GenModel[str] data1: GenModel assert Model.schema() == { 'title': 'Model', 'type': 'object', 'properties': { 'data': {'title': 'Data', 'type': 'string'}, 'data1': { 'title': 'Data1', }, }, 'required': ['data', 'data1'], } class GenModelModified(GenModel, Generic[T]): @classmethod def __modify_schema__(cls, field_schema): field_schema.pop('type', None) field_schema.update(anyOf=[{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]) class ModelModified(BaseModel): data: GenModelModified[str] data1: GenModelModified assert ModelModified.schema() == { 'title': 'ModelModified', 'type': 'object', 'properties': { 'data': {'title': 'Data', 'anyOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}, 'data1': {'title': 'Data1', 'anyOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}, }, 'required': ['data', 'data1'], }
""" Wolfram|Alpha Adapter is an adapter which is used to fetch results from the Wolfram ALpha server MARKS LICENSE AND ATTRIBUTION"Wolfram|Alpha Marks" means the trade names, trademarks, service marks, logos, domain names and other distinctive marks of Wolfram|Alpha. Wolfram|Alpha grants You a non-exclusive license to use the Wolfram|Alpha Marks solely in connection with their display on or through the API Client as delivered by Wolfram|Alpha. Your API Client shall provide proper attribution to Wolfram|Alpha whenever such content is displayed or accessed by providing the end user with a direct link to the specific Wolfram|Alpha result page from which the content was derived. Wolfram|Alpha may terminate Your license to use the Wolfram|Alpha Marks at any time for any or no reason. You shall not at any time challenge or assist others to challenge Wolfram|Alpha Marks or their registration (except to the extent You cannot give up that right by law) or to register any trademarks, marks, domains or trade names obviously similar, in Wolfram|Alpha's discretion, to those of Wolfram|Alpha. This prohibition survives any termination or expiration of this Agreement. LINKINGUnless part of a written agreement to the contrary, You are required to provide a conspicuous hyperlink directly to the corresponding results page of the Wolfram|Alpha website (http://www.wolframalpha.com) on every page with Results. """ import os import requests from sugaroid.core.base_adapters import SugaroidLogicAdapter from sugaroid.sugaroid import SugaroidStatement, sugaroid_logger from sugaroid.brain.ooo import Emotion class WolframAlphaAdapter(SugaroidLogicAdapter): """ Wolfram Alpha Adapter for Sugaroid """ def can_process(self, statement: SugaroidStatement): contains_numbers = False for i in statement.simple: if any((j.isdigit() for j in i)): contains_numbers = True return ( ( "why" in statement.simple or "who" in statement.simple or "int" in statement.simple or "when" in statement.simple or "which" in statement.simple or "where" in statement.simple or "how" in statement.simple or "$wolf" in statement.simple or contains_numbers ) and os.getenv("WOLFRAM_ALPHA_API") and not ( "you" in statement.simple or "favorite" in statement.simple or "favourite" in statement.simple or "me" in statement.simple or "like" in statement.simple or "your" in statement.simple or "what" in statement.simple or "him" in statement.simple or "her" in statement.simple or "she" in statement.simple or "he" in statement.simple or "them" in statement.simple or "i" in statement.simple ) ) def process( self, statement: SugaroidStatement, additional_response_selection_parameters=None, ): wolf_command = False user_requests_text = False supports_media = self.chatbot.globals["media"] rich_text = self.chatbot.globals["rich"] if "$wolf" in statement.simple: # this is a command type wolfram alpha request wolf_command = True statement.simple.remove("$wolf") if "$text" in statement.simple: user_requests_text = True statement.simple.remove("$text") url = ( "https://api.wolframalpha.com/v2/query?" "input={query}" "&format={format}&output=JSON&appid={appid}" ) url = url.format( query="+".join(statement.simple), appid=os.getenv("WOLFRAM_ALPHA_API", "DEMO"), format="image,plaintext" if supports_media else "plaintext", ) sugaroid_logger.info(f"WolframAlpha endpoint: {url}") response = requests.get(url, headers={"Accept": "application/json"}).json() if not response["queryresult"]["success"]: confidence = 0.3 try: text = response["queryresult"]["tips"]["text"] except KeyError: text = "Wolfram Alpha didnt send back a response" confidence = 0 if wolf_command: confidence = 1 selected_statement = SugaroidStatement(text, chatbot=True) selected_statement.confidence = confidence selected_statement.emotion = Emotion.positive return selected_statement information = [] for i in response["queryresult"]["pods"]: for j in i["subpods"]: if j["plaintext"]: plaintext_answer = j["plaintext"].split("\n") for ans in plaintext_answer: splitted_ans = ans.split("|") sugaroid_logger.info("splitted_ans") if len(splitted_ans) == 1: front = splitted_ans[0] back, rest = "", "" elif len(splitted_ans) == 2: front, back = splitted_ans rest = "" else: front, back, rest = ( splitted_ans[0], splitted_ans[1], splitted_ans[2:], ) if rich_text: if not back: information.append(f"<b>{front}</b>") else: information.append( f"<b>{front}</b>: {back} {" ".join(rest)}" ) else: if not back: information.append(f"{front}") else: information.append(f"{front}: {back} {" ".join(rest)}") if supports_media: information.append("<sugaroid:br>") for i in response["queryresult"]["pods"]: for j in i["subpods"]: if not j.get("plaintext") and j.get("img") and j["img"].get("src"): information.append( f'<sugaroid:img>{j['img']['src']}<sugaroid:br>' ) information.append("Results powered by Wolfram|Alpha (wolframalpha.com)") interpretation = "\n".join(information) selected_statement = SugaroidStatement(interpretation, chatbot=True) selected_statement.set_confidence(1) selected_statement.set_emotion(Emotion.lol) return selected_statement
""" Wolfram|Alpha Adapter is an adapter which is used to fetch results from the Wolfram ALpha server MARKS LICENSE AND ATTRIBUTION"Wolfram|Alpha Marks" means the trade names, trademarks, service marks, logos, domain names and other distinctive marks of Wolfram|Alpha. Wolfram|Alpha grants You a non-exclusive license to use the Wolfram|Alpha Marks solely in connection with their display on or through the API Client as delivered by Wolfram|Alpha. Your API Client shall provide proper attribution to Wolfram|Alpha whenever such content is displayed or accessed by providing the end user with a direct link to the specific Wolfram|Alpha result page from which the content was derived. Wolfram|Alpha may terminate Your license to use the Wolfram|Alpha Marks at any time for any or no reason. You shall not at any time challenge or assist others to challenge Wolfram|Alpha Marks or their registration (except to the extent You cannot give up that right by law) or to register any trademarks, marks, domains or trade names obviously similar, in Wolfram|Alpha's discretion, to those of Wolfram|Alpha. This prohibition survives any termination or expiration of this Agreement. LINKINGUnless part of a written agreement to the contrary, You are required to provide a conspicuous hyperlink directly to the corresponding results page of the Wolfram|Alpha website (http://www.wolframalpha.com) on every page with Results. """ import os import requests from sugaroid.core.base_adapters import SugaroidLogicAdapter from sugaroid.sugaroid import SugaroidStatement, sugaroid_logger from sugaroid.brain.ooo import Emotion class WolframAlphaAdapter(SugaroidLogicAdapter): """ Wolfram Alpha Adapter for Sugaroid """ def can_process(self, statement: SugaroidStatement): contains_numbers = False for i in statement.simple: if any((j.isdigit() for j in i)): contains_numbers = True return ( ( "why" in statement.simple or "who" in statement.simple or "int" in statement.simple or "when" in statement.simple or "which" in statement.simple or "where" in statement.simple or "how" in statement.simple or "$wolf" in statement.simple or contains_numbers ) and os.getenv("WOLFRAM_ALPHA_API") and not ( "you" in statement.simple or "favorite" in statement.simple or "favourite" in statement.simple or "me" in statement.simple or "like" in statement.simple or "your" in statement.simple or "what" in statement.simple or "him" in statement.simple or "her" in statement.simple or "she" in statement.simple or "he" in statement.simple or "them" in statement.simple or "i" in statement.simple ) ) def process( self, statement: SugaroidStatement, additional_response_selection_parameters=None, ): wolf_command = False user_requests_text = False supports_media = self.chatbot.globals["media"] rich_text = self.chatbot.globals["rich"] if "$wolf" in statement.simple: # this is a command type wolfram alpha request wolf_command = True statement.simple.remove("$wolf") if "$text" in statement.simple: user_requests_text = True statement.simple.remove("$text") url = ( "https://api.wolframalpha.com/v2/query?" "input={query}" "&format={format}&output=JSON&appid={appid}" ) url = url.format( query="+".join(statement.simple), appid=os.getenv("WOLFRAM_ALPHA_API", "DEMO"), format="image,plaintext" if supports_media else "plaintext", ) sugaroid_logger.info(f"WolframAlpha endpoint: {url}") response = requests.get(url, headers={"Accept": "application/json"}).json() if not response["queryresult"]["success"]: confidence = 0.3 try: text = response["queryresult"]["tips"]["text"] except KeyError: text = "Wolfram Alpha didnt send back a response" confidence = 0 if wolf_command: confidence = 1 selected_statement = SugaroidStatement(text, chatbot=True) selected_statement.confidence = confidence selected_statement.emotion = Emotion.positive return selected_statement information = [] for i in response["queryresult"]["pods"]: for j in i["subpods"]: if j["plaintext"]: plaintext_answer = j["plaintext"].split("\n") for ans in plaintext_answer: splitted_ans = ans.split("|") sugaroid_logger.info("splitted_ans") if len(splitted_ans) == 1: front = splitted_ans[0] back, rest = "", "" elif len(splitted_ans) == 2: front, back = splitted_ans rest = "" else: front, back, rest = ( splitted_ans[0], splitted_ans[1], splitted_ans[2:], ) if rich_text: if not back: information.append(f"<b>{front}</b>") else: information.append( f"<b>{front}</b>: {back} {' '.join(rest)}" ) else: if not back: information.append(f"{front}") else: information.append(f"{front}: {back} {' '.join(rest)}") if supports_media: information.append("<sugaroid:br>") for i in response["queryresult"]["pods"]: for j in i["subpods"]: if not j.get("plaintext") and j.get("img") and j["img"].get("src"): information.append( f'<sugaroid:img>{j["img"]["src"]}<sugaroid:br>' ) information.append("Results powered by Wolfram|Alpha (wolframalpha.com)") interpretation = "\n".join(information) selected_statement = SugaroidStatement(interpretation, chatbot=True) selected_statement.set_confidence(1) selected_statement.set_emotion(Emotion.lol) return selected_statement
#!/usr/bin/env python """ Wrapper classes for providing a minimal consistent interface to cheminformatics toolkits Currently supported toolkits: * The `OpenEye Toolkit <https://docs.eyesopen.com/toolkits/python/quickstart-python/index.html>`_ * The `RDKit <http://www.rdkit.org/>`_ * `AmberTools <http://ambermd.org/AmberTools.php>`_ .. todo:: * Add checks at the beginning of each toolkit method call to make sure toolkit is licened * Switch toolkit methods to object methods instead of static methods * Should this be under ``openff.toolkit.utils.toolkits`` or ``openff.toolkit.toolkits``? * Add singleton global toolkit registry that registers all available toolkits by default when this file is imported * Add description fields for each toolkit wrapper * Eliminate global variables in favor of a singleton pattern * Change global variables from _INSTALLED to _AVAILABLE """ __all__ = [ "DEFAULT_AROMATICITY_MODEL", "ALLOWED_AROMATICITY_MODELS", "DEFAULT_FRACTIONAL_BOND_ORDER_MODEL", "ALLOWED_FRACTIONAL_BOND_ORDER_MODELS", "DEFAULT_CHARGE_MODEL", "ALLOWED_CHARGE_MODELS", "LicenseError", "MissingPackageError", "ToolkitUnavailableException", "InvalidToolkitError", "InvalidToolkitRegistryError", "UndefinedStereochemistryError", "GAFFAtomTypeWarning", "ToolkitWrapper", "BuiltInToolkitWrapper", "OpenEyeToolkitWrapper", "RDKitToolkitWrapper", "AmberToolsToolkitWrapper", "BuiltInToolkitWrapper", "ToolkitRegistry", "GLOBAL_TOOLKIT_REGISTRY", "OPENEYE_AVAILABLE", "RDKIT_AVAILABLE", "AMBERTOOLS_AVAILABLE", "BASIC_CHEMINFORMATICS_TOOLKITS", ] # ============================================================================================= # GLOBAL IMPORTS # ============================================================================================= import copy import importlib import inspect import itertools import logging import re import subprocess import tempfile from collections import defaultdict from distutils.spawn import find_executable from functools import wraps from typing import TYPE_CHECKING, List, Optional, Tuple import numpy as np from simtk import unit from openff.toolkit.utils.utils import ( MessageException, all_subclasses, inherit_docstrings, temporary_cd, ) if TYPE_CHECKING: from openforcefield.topology.molecule import Molecule # ============================================================================================= # CONFIGURE LOGGER # ============================================================================================= logger = logging.getLogger(__name__) # ============================================================================================= # SUPPORTED MODELS # # TODO: We may no longer need these since we now require SMIRNOFF to specify these models explicitly. # ============================================================================================= DEFAULT_AROMATICITY_MODEL = "OEAroModel_MDL" # TODO: Is there a more specific name and reference for the aromaticity model? ALLOWED_AROMATICITY_MODELS = ["OEAroModel_MDL"] DEFAULT_FRACTIONAL_BOND_ORDER_MODEL = "Wiberg" # TODO: Is there a more specific name and reference for the fractional bond order models? ALLOWED_FRACTIONAL_BOND_ORDER_MODELS = ["Wiberg"] DEFAULT_CHARGE_MODEL = "AM1-BCC" # TODO: Should this be `AM1-BCC`, or should we encode BCCs explicitly via AM1-CM2 preprocessing? ALLOWED_CHARGE_MODELS = ["AM1-BCC"] # TODO: Which models do we want to support? # ============================================================================================= # Exceptions # ============================================================================================= class MissingPackageError(MessageException): """This function requires a package that is not installed.""" class ToolkitUnavailableException(MessageException): """The requested toolkit is unavailable.""" # TODO: Allow toolkit to be specified and used in formatting/printing exception. class LicenseError(ToolkitUnavailableException): """This function requires a license that cannot be found.""" class InvalidToolkitError(MessageException): """A non-toolkit object was received when a toolkit object was expected""" class InvalidToolkitRegistryError(MessageException): """An object other than a ToolkitRegistry or toolkit wrapper was received""" class UndefinedStereochemistryError(MessageException): """A molecule was attempted to be loaded with undefined stereochemistry""" class GAFFAtomTypeWarning(RuntimeWarning): """A warning raised if a loaded mol2 file possibly uses GAFF atom types.""" class ChargeMethodUnavailableError(MessageException): """A toolkit does not support the requested partial_charge_method combination""" class IncorrectNumConformersError(MessageException): """The requested partial_charge_method expects a different number of conformers than was provided""" class IncorrectNumConformersWarning(Warning): """The requested partial_charge_method expects a different number of conformers than was provided""" class ChargeCalculationError(MessageException): """An unhandled error occured in an external toolkit during charge calculation""" class InvalidIUPACNameError(MessageException): """Failed to parse IUPAC name""" class AntechamberNotFoundError(MessageException): """The antechamber executable was not found""" # ============================================================================================= # TOOLKIT UTILITY DECORATORS # ============================================================================================= # ============================================================================================= # UTILITY FUNCTIONS # ============================================================================================= # ============================================================================================= # CHEMINFORMATICS TOOLKIT WRAPPERS # ============================================================================================= class ToolkitWrapper: """ Toolkit wrapper base class. .. warning :: This API is experimental and subject to change. """ _is_available = None # True if toolkit is available _toolkit_version = None _toolkit_name = None # Name of the toolkit _toolkit_installation_instructions = ( None # Installation instructions for the toolkit ) # @staticmethod # TODO: Right now, to access the class definition, I have to make this a classmethod # and thereby call it with () on the outermost decorator. Is this wasting time? Are we caching # the is_available results? @classmethod def requires_toolkit(cls): # remember cls is a ToolkitWrapper subclass here def decorator(func): @wraps(func) def wrapped_function(*args, **kwargs): if not cls.is_available(): msg = "This function requires the {} toolkit".format( cls._toolkit_name ) raise ToolkitUnavailableException(msg) value = func(*args, **kwargs) return value return wrapped_function return decorator @property # @classmethod def toolkit_name(self): """ Return the name of the toolkit wrapped by this class as a str .. warning :: This API is experimental and subject to change. Returns ------- toolkit_name : str The name of the wrapped toolkit """ return self.__class__._toolkit_name @property # @classmethod def toolkit_installation_instructions(self): """ Instructions on how to install the wrapped toolkit. """ return self._toolkit_installation_instructions # @classmethod @property def toolkit_file_read_formats(self): """ List of file formats that this toolkit can read. """ return self._toolkit_file_read_formats # @classmethod @property def toolkit_file_write_formats(self): """ List of file formats that this toolkit can write. """ return self._toolkit_file_write_formats @classmethod def is_available(cls): """ Check whether the corresponding toolkit can be imported Returns ------- is_installed : bool True if corresponding toolkit is installed, False otherwise. """ return NotImplementedError @property def toolkit_version(self): """ Return the version of the wrapped toolkit as a str .. warning :: This API is experimental and subject to change. Returns ------- toolkit_version : str The version of the wrapped toolkit """ return self._toolkit_version def from_file(self, file_path, file_format, allow_undefined_stereo=False): """ Return an openff.toolkit.topology.Molecule from a file using this toolkit. Parameters ---------- file_path : str The file to read the molecule from file_format : str Format specifier, usually file suffix (eg. 'MOL2', 'SMI') Note that not all toolkits support all formats. Check ToolkitWrapper.toolkit_file_read_formats for details. allow_undefined_stereo : bool, default=False If false, raises an exception if any molecules contain undefined stereochemistry. _cls : class Molecule constructor Returns ------- molecules : Molecule or list of Molecules a list of Molecule objects is returned. """ return NotImplementedError def from_file_obj( self, file_obj, file_format, allow_undefined_stereo=False, _cls=None ): """ Return an openff.toolkit.topology.Molecule from a file-like object (an object with a ".read()" method using this toolkit. Parameters ---------- file_obj : file-like object The file-like object to read the molecule from file_format : str Format specifier, usually file suffix (eg. 'MOL2', 'SMI') Note that not all toolkits support all formats. Check ToolkitWrapper.toolkit_file_read_formats for details. allow_undefined_stereo : bool, default=False If false, raises an exception if any molecules contain undefined stereochemistry. If false, the function skips loading the molecule. _cls : class Molecule constructor Returns ------- molecules : Molecule or list of Molecules a list of Molecule objects is returned. """ return NotImplementedError def _check_n_conformers( self, molecule, partial_charge_method, min_confs=None, max_confs=None, strict_n_conformers=False, ): """ Private method for validating the number of conformers on a molecule prior to partial charge calculation Parameters ---------- molecule : Molecule Molecule for which partial charges are to be computed partial_charge_method : str, optional, default=None The name of the charge method being used min_confs : int, optional, default=None The minimum number of conformers required to use this charge method max_confs : int, optional, default=None The maximum number of conformers required to use this charge method strict_n_conformers : bool, default=False Whether to raise an exception if an invalid number of conformers is provided. If this is False and an invalid number of conformers is found, a warning will be raised. Raises ------ IncorrectNumConformersError If the wrong number of conformers is attached to the input molecule, and strict_n_conformers is True. """ import warnings n_confs = molecule.n_conformers wrong_confs_msg = ( f"Molecule '{molecule}' has {n_confs} conformers, " f"but charge method '{partial_charge_method}' expects" ) exception_suffix = ( "You can disable this error by setting `strict_n_conformers=False' " "when calling 'molecule.assign_partial_charges'." ) # If there's no n_confs filter, then this molecule automatically passes if min_confs is None and max_confs is None: return # If there's constraints on both ends, check both limits elif min_confs is not None and max_confs is not None: if not (min_confs <= n_confs <= max_confs): if min_confs == max_confs: wrong_confs_msg += f" exactly {min_confs}." else: wrong_confs_msg += f" between {min_confs} and {max_confs}." else: return # If there's only a max constraint, check that elif min_confs is not None and max_confs is None: if not (min_confs <= n_confs): wrong_confs_msg += f" at least {min_confs}." else: return # If there's only a maximum constraint, check that elif min_confs is None and max_confs is not None: if not (n_confs <= max_confs): wrong_confs_msg += f" at most {max_confs}." else: return # If we've made it this far, the molecule has the wrong number of conformers if strict_n_conformers: wrong_confs_msg += exception_suffix raise IncorrectNumConformersError(wrong_confs_msg) else: warnings.warn(wrong_confs_msg, IncorrectNumConformersWarning) def __repr__(self): return ( f"ToolkitWrapper around {self.toolkit_name} version {self.toolkit_version}" ) @inherit_docstrings class BuiltInToolkitWrapper(ToolkitWrapper): """ Built-in ToolkitWrapper for very basic functionality. This is intended for use in testing and not much more. .. warning :: This API is experimental and subject to change. """ _toolkit_name = "Built-in Toolkit" _toolkit_installation_instructions = ( "This toolkit is installed with the Open Force Field Toolkit and does " "not require additional dependencies." ) def __init__(self): super().__init__() self._toolkit_file_read_formats = [] self._toolkit_file_write_formats = [] def assign_partial_charges( self, molecule, partial_charge_method=None, use_conformers=None, strict_n_conformers=False, _cls=None, ): """ Compute partial charges with the built-in toolkit using simple arithmetic operations, and assign the new values to the partial_charges attribute. .. warning :: This API is experimental and subject to change. Parameters ---------- molecule : openff.toolkit.topology.Molecule Molecule for which partial charges are to be computed partial_charge_method: str, optional, default=None The charge model to use. One of ['zeros', 'formal_charge']. If None, 'formal_charge' will be used. use_conformers : iterable of simtk.unit.Quantity-wrapped numpy arrays, each with shape (n_atoms, 3) and dimension of distance. Optional, default = None Coordinates to use for partial charge calculation. If None, an appropriate number of conformers will be generated. strict_n_conformers : bool, default=False Whether to raise an exception if an invalid number of conformers is provided for the given charge method. If this is False and an invalid number of conformers is found, a warning will be raised instead of an Exception. _cls : class Molecule constructor Raises ------ ChargeMethodUnavailableError if the requested charge method can not be handled by this toolkit IncorrectNumConformersError if strict_n_conformers is True and use_conformers is provided and specifies an invalid number of conformers for the requested method ChargeCalculationError if the charge calculation is supported by this toolkit, but fails """ PARTIAL_CHARGE_METHODS = { "zeros": {"rec_confs": 0, "min_confs": 0, "max_confs": 0}, "formal_charge": {"rec_confs": 0, "min_confs": 0, "max_confs": 0}, } if partial_charge_method is None: partial_charge_method = "formal_charge" if _cls is None: from openff.toolkit.topology.molecule import Molecule _cls = Molecule # Make a temporary copy of the molecule, since we'll be messing with its conformers mol_copy = _cls(molecule) partial_charge_method = partial_charge_method.lower() if partial_charge_method not in PARTIAL_CHARGE_METHODS: raise ChargeMethodUnavailableError( f'Partial charge method "{partial_charge_method}"" is not supported by ' f"the Built-in toolkit. Available charge methods are " f"{list(PARTIAL_CHARGE_METHODS.keys())}" ) if use_conformers is None: # Note that this refers back to the GLOBAL_TOOLKIT_REGISTRY by default, since # BuiltInToolkitWrapper can't generate conformers mol_copy.generate_conformers( n_conformers=PARTIAL_CHARGE_METHODS[partial_charge_method]["rec_confs"] ) else: mol_copy._conformers = None for conformer in use_conformers: mol_copy._add_conformer(conformer) self._check_n_conformers( mol_copy, partial_charge_method=partial_charge_method, min_confs=0, max_confs=0, strict_n_conformers=strict_n_conformers, ) partial_charges = unit.Quantity( np.zeros((molecule.n_particles)), unit.elementary_charge ) if partial_charge_method == "zeroes": pass elif partial_charge_method == "formal_charge": for part_idx, particle in enumerate(molecule.particles): partial_charges[part_idx] = particle.formal_charge molecule.partial_charges = partial_charges @inherit_docstrings class OpenEyeToolkitWrapper(ToolkitWrapper): """ OpenEye toolkit wrapper .. warning :: This API is experimental and subject to change. """ _toolkit_name = "OpenEye Toolkit" _toolkit_installation_instructions = ( "The OpenEye toolkit requires a (free for academics) license, and can be " "found at: " "https://docs.eyesopen.com/toolkits/python/quickstart-python/install.html" ) # This could belong to ToolkitWrapper, although it seems strange # to carry that data for open-source toolkits _is_licensed = None # Only for OpenEye is there potentially a difference between # being available and installed _is_installed = None _license_functions = { "oechem": "OEChemIsLicensed", "oequacpac": "OEQuacPacIsLicensed", "oeiupac": "OEIUPACIsLicensed", "oeomega": "OEOmegaIsLicensed", } def __init__(self): self._toolkit_file_read_formats = [ "CAN", "CDX", "CSV", "FASTA", "INCHI", "INCHIKEY", "ISM", "MDL", "MF", "MMOD", "MOL2", "MOL2H", "MOPAC", "OEB", "PDB", "RDF", "SDF", "SKC", "SLN", "SMI", "USM", "XYC", ] self._toolkit_file_write_formats = [ "CAN", "CDX", "CSV", "FASTA", "INCHI", "INCHIKEY", "ISM", "MDL", "MF", "MMOD", "MOL2", "MOL2H", "MOPAC", "OEB", "PDB", "RDF", "SDF", "SKC", "SLN", "SMI", "USM", "XYC", ] # check if the toolkit can be loaded if not self.is_available(): msg = ( f"The required toolkit {self._toolkit_name} is not " f"available. {self._toolkit_installation_instructions}" ) if self._is_installed is False: raise ToolkitUnavailableException(msg) if self._is_licensed is False: raise LicenseError(msg) from openeye import __version__ as openeye_version self._toolkit_version = openeye_version @classmethod def _check_licenses(cls): """Check license of all known OpenEye tools. Returns True if any are found to be licensed, False if any are not.""" for (tool, license_func) in cls._license_functions.items(): try: module = importlib.import_module("openeye." + tool) except (ImportError, ModuleNotFoundError): continue else: if getattr(module, license_func)(): return True return False @classmethod def is_available(cls): """ Check if the given OpenEye toolkit components are available. If the OpenEye toolkit is not installed or no license is found for at least one the required toolkits , ``False`` is returned. Returns ------- all_installed : bool ``True`` if all required OpenEye tools are installed and licensed, ``False`` otherwise """ if cls._is_available is None: if cls._is_licensed is None: cls._is_licensed = cls._check_licenses() if cls._is_installed is None: for tool in cls._license_functions.keys(): cls._is_installed = True try: importlib.import_module("openeye." + tool) except (ImportError, ModuleNotFoundError): cls._is_installed = False cls._is_available = cls._is_installed and cls._is_licensed return cls._is_available def from_object(self, obj, allow_undefined_stereo=False, _cls=None): """ If given an OEMol (or OEMol-derived object), this function will load it into an openff.toolkit.topology.molecule Parameters ---------- obj : A molecule-like object An object to by type-checked. allow_undefined_stereo : bool, default=False Whether to accept molecules with undefined stereocenters. If False, an exception will be raised if a molecule with undefined stereochemistry is passed into this function. _cls : class Molecule constructor Returns ------- Molecule An openff.toolkit.topology.molecule Molecule. Raises ------ NotImplementedError If the object could not be converted into a Molecule. """ # TODO: Add tests for the from_object functions from openeye import oechem if _cls is None: from openff.toolkit.topology.molecule import Molecule _cls = Molecule if isinstance(obj, oechem.OEMolBase): return self.from_openeye( oemol=obj, allow_undefined_stereo=allow_undefined_stereo, _cls=_cls ) raise NotImplementedError( "Cannot create Molecule from {} object".format(type(obj)) ) def from_file( self, file_path, file_format, allow_undefined_stereo=False, _cls=None ): """ Return an openff.toolkit.topology.Molecule from a file using this toolkit. Parameters ---------- file_path : str The file to read the molecule from file_format : str Format specifier, usually file suffix (eg. 'MOL2', 'SMI') Note that not all toolkits support all formats. Check ToolkitWrapper.toolkit_file_read_formats for details. allow_undefined_stereo : bool, default=False If false, raises an exception if oemol contains undefined stereochemistry. _cls : class Molecule constructor Returns ------- molecules : List[Molecule] The list of ``Molecule`` objects in the file. Raises ------ GAFFAtomTypeWarning If the loaded mol2 file possibly uses GAFF atom types, which are not supported. Examples -------- Load a mol2 file into an OpenFF ``Molecule`` object. >>> from openff.toolkit.utils import get_data_file_path >>> mol2_file_path = get_data_file_path('molecules/cyclohexane.mol2') >>> toolkit = OpenEyeToolkitWrapper() >>> molecule = toolkit.from_file(mol2_file_path, file_format='mol2') """ from openeye import oechem ifs = oechem.oemolistream(file_path) return self._read_oemolistream_molecules( ifs, allow_undefined_stereo, file_path=file_path, _cls=_cls ) def from_file_obj( self, file_obj, file_format, allow_undefined_stereo=False, _cls=None ): """ Return an openff.toolkit.topology.Molecule from a file-like object (an object with a ".read()" method using this toolkit. Parameters ---------- file_obj : file-like object The file-like object to read the molecule from file_format : str Format specifier, usually file suffix (eg. 'MOL2', 'SMI') Note that not all toolkits support all formats. Check ToolkitWrapper.toolkit_file_read_formats for details. allow_undefined_stereo : bool, default=False If false, raises an exception if oemol contains undefined stereochemistry. _cls : class Molecule constructor Returns ------- molecules : List[Molecule] The list of Molecule objects in the file object. Raises ------ GAFFAtomTypeWarning If the loaded mol2 file possibly uses GAFF atom types, which are not supported. """ from openeye import oechem # Configure input molecule stream. ifs = oechem.oemolistream() ifs.openstring(file_obj.read()) oeformat = getattr(oechem, "OEFormat_" + file_format) ifs.SetFormat(oeformat) return self._read_oemolistream_molecules(ifs, allow_undefined_stereo, _cls=_cls) def to_file_obj(self, molecule, file_obj, file_format): """ Writes an OpenFF Molecule to a file-like object Parameters ---------- molecule : an OpenFF Molecule The molecule to write file_obj The file-like object to write to file_format The format for writing the molecule data """ with tempfile.TemporaryDirectory() as tmpdir: with temporary_cd(tmpdir): outfile = "temp_molecule." + file_format self.to_file(molecule, outfile, file_format) file_data = open(outfile).read() file_obj.write(file_data) def to_file(self, molecule, file_path, file_format): """ Writes an OpenFF Molecule to a file-like object Parameters ---------- molecule : an OpenFF Molecule The molecule to write file_path The file path to write to. file_format The format for writing the molecule data """ from openeye import oechem oemol = self.to_openeye(molecule) ofs = oechem.oemolostream(file_path) openeye_format = getattr(oechem, "OEFormat_" + file_format.upper()) ofs.SetFormat(openeye_format) # OFFTK strictly treats SDF as a single-conformer format. # We need to override OETK's behavior here if the user is saving a multiconformer molecule. # Remove all but the first conformer when writing to SDF as we only support single conformer format if (file_format.lower() == "sdf") and oemol.NumConfs() > 1: conf1 = [conf for conf in oemol.GetConfs()][0] flat_coords = list() for idx, coord in conf1.GetCoords().items(): flat_coords.extend(coord) oemol.DeleteConfs() oecoords = oechem.OEFloatArray(flat_coords) oemol.NewConf(oecoords) # We're standardizing on putting partial charges into SDFs under the `atom.dprop.PartialCharge` property if (file_format.lower() == "sdf") and (molecule.partial_charges is not None): partial_charges_list = [ oeatom.GetPartialCharge() for oeatom in oemol.GetAtoms() ] partial_charges_str = " ".join([f"{val:f}" for val in partial_charges_list]) # TODO: "dprop" means "double precision" -- Is there any way to make Python more accurately # describe/infer the proper data type? oechem.OESetSDData(oemol, "atom.dprop.PartialCharge", partial_charges_str) # If the file format is "pdb" using OEWriteMolecule() rearranges the atoms (hydrogens are pushed to the bottom) # Issue #475 (https://github.com/openforcefield/openff-toolkit/issues/475) # dfhahn's workaround: Using OEWritePDBFile does not alter the atom arrangement if file_format.lower() == "pdb": if oemol.NumConfs() > 1: for conf in oemol.GetConfs(): oechem.OEWritePDBFile(ofs, conf, oechem.OEOFlavor_PDB_BONDS) else: oechem.OEWritePDBFile(ofs, oemol, oechem.OEOFlavor_PDB_BONDS) else: oechem.OEWriteMolecule(ofs, oemol) ofs.close() @staticmethod def _turn_oemolbase_sd_charges_into_partial_charges(oemol): """ Process an OEMolBase object and check to see whether it has an SD data pair where the tag is "atom.dprop.PartialCharge", indicating that it has a list of atomic partial charges. If so, apply those charges to the OEAtoms in the OEMolBase, and delete the SD data pair. Parameters ---------- oemol : openeye.oechem.OEMolBase The molecule to process Returns ------- charges_are_present : bool Whether charges are present in the SD file. This is necessary because OEAtoms have a default partial charge of 0.0, which makes truly zero-charge molecules (eg "N2", "Ar"...) indistinguishable from molecules for which partial charges have not been assigned. The OFF Toolkit allows this distinction with mol.partial_charges=None. In order to complete roundtrips within the OFFMol spec, we must interpret the presence or absence of this tag as a proxy for mol.partial_charges=None. """ from openeye import oechem for dp in oechem.OEGetSDDataPairs(oemol): if dp.GetTag() == "atom.dprop.PartialCharge": charges_str = oechem.OEGetSDData(oemol, "atom.dprop.PartialCharge") charges_unitless = [float(i) for i in charges_str.split()] assert len(charges_unitless) == oemol.NumAtoms() for charge, oeatom in zip(charges_unitless, oemol.GetAtoms()): oeatom.SetPartialCharge(charge) oechem.OEDeleteSDData(oemol, "atom.dprop.PartialCharge") return True return False def _read_oemolistream_molecules( self, oemolistream, allow_undefined_stereo, file_path=None, _cls=None ): """ Reads and return the Molecules in a OEMol input stream. Parameters ---------- oemolistream : oechem.oemolistream The OEMol input stream to read from. allow_undefined_stereo : bool If false, raises an exception if oemol contains undefined stereochemistry. file_path : str, optional The path to the mol2 file. This is used exclusively to make the error message more meaningful when the mol2 files doesn't use Tripos atom types. _cls : class Molecule constructor Returns ------- molecules : List[Molecule] The list of Molecule objects in the stream. """ from openeye import oechem mols = list() oemol = oechem.OEMol() while oechem.OEReadMolecule(oemolistream, oemol): oechem.OEPerceiveChiral(oemol) oechem.OEAssignAromaticFlags(oemol, oechem.OEAroModel_MDL) oechem.OE3DToInternalStereo(oemol) # If this is either a multi-conformer or multi-molecule SD file, check to see if there are partial charges if (oemolistream.GetFormat() == oechem.OEFormat_SDF) and hasattr( oemol, "GetConfs" ): # The openFF toolkit treats each conformer in a "multiconformer" SDF as # a separate molecule. # https://github.com/openforcefield/openff-toolkit/issues/202 # Note that there is ambiguity about how SD data and "multiconformer" SD files should be stored. # As a result, we have to do some weird stuff below, as discussed in # https://docs.eyesopen.com/toolkits/python/oechemtk/oemol.html#dude-where-s-my-sd-data # Jeff: I was unable to find a way to distinguish whether a SDF was multiconformer or not. # The logic below should handle either single- or multi-conformer SDFs. for conf in oemol.GetConfIter(): # First, we turn "conf" into an OEMCMol (OE multiconformer mol), since OTHER file formats # really are multiconformer, and we will eventually feed this into the `from_openeye` function, # which is made to ingest multiconformer mols. this_conf_oemcmol = conf.GetMCMol() # Then, we take any SD data pairs that were on the oemol, and copy them on to "this_conf_oemcmol". # These SD pairs will be populated if we're dealing with a single-conformer SDF. for dp in oechem.OEGetSDDataPairs(oemol): oechem.OESetSDData( this_conf_oemcmol, dp.GetTag(), dp.GetValue() ) # On the other hand, these SD pairs will be populated if we're dealing with a MULTI-conformer SDF. for dp in oechem.OEGetSDDataPairs(conf): oechem.OESetSDData( this_conf_oemcmol, dp.GetTag(), dp.GetValue() ) # This function fishes out the special SD data tag we use for partial charge # ("atom.dprop.PartialCharge"), and applies those as OETK-supported partial charges on the OEAtoms has_charges = self._turn_oemolbase_sd_charges_into_partial_charges( this_conf_oemcmol ) # Finally, we feed the molecule into `from_openeye`, where it converted into an OFFMol mol = self.from_openeye( this_conf_oemcmol, allow_undefined_stereo=allow_undefined_stereo, _cls=_cls, ) # If the molecule didn't even have the `PartialCharges` tag, we set it from zeroes to None here. if not (has_charges): mol.partial_charges = None mols.append(mol) else: # In case this is being read from a SINGLE-molecule SD file, convert the SD field where we # stash partial charges into actual per-atom partial charges self._turn_oemolbase_sd_charges_into_partial_charges(oemol) mol = self.from_openeye( oemol, allow_undefined_stereo=allow_undefined_stereo, _cls=_cls ) mols.append(mol) # Check if this is an AMBER-produced mol2 file, which we can not load because they use GAFF atom types. if oemolistream.GetFormat() == oechem.OEFormat_MOL2: self._check_mol2_gaff_atom_type(mol, file_path) return mols def enumerate_protomers(self, molecule, max_states=10): """ Enumerate the formal charges of a molecule to generate different protomoers. Parameters ---------- molecule: openff.toolkit.topology.Molecule The molecule whose state we should enumerate max_states: int optional, default=10, The maximum number of protomer states to be returned. Returns ------- molecules: List[openff.toolkit.topology.Molecule], A list of the protomers of the input molecules not including the input. """ from openeye import oequacpac options = oequacpac.OEFormalChargeOptions() # add one as the input is included options.SetMaxCount(max_states + 1) molecules = [] oemol = self.to_openeye(molecule=molecule) for protomer in oequacpac.OEEnumerateFormalCharges(oemol, options): mol = self.from_openeye( protomer, allow_undefined_stereo=True, _cls=molecule.__class__ ) if mol != molecule: molecules.append(mol) return molecules def enumerate_stereoisomers( self, molecule, undefined_only=False, max_isomers=20, rationalise=True ): """ Enumerate the stereocenters and bonds of the current molecule. Parameters ---------- molecule: openff.toolkit.topology.Molecule The molecule whose state we should enumerate undefined_only: bool optional, default=False If we should enumerate all stereocenters and bonds or only those with undefined stereochemistry max_isomers: int optional, default=20 The maximum amount of molecules that should be returned rationalise: bool optional, default=True If we should try to build and rationalise the molecule to ensure it can exist Returns -------- molecules: List[openff.toolkit.topology.Molecule] A list of openff.toolkit.topology.Molecule instances """ from openeye import oechem, oeomega oemol = self.to_openeye(molecule=molecule) # arguments for this function can be found here # <https://docs.eyesopen.com/toolkits/python/omegatk/OEConfGenFunctions/OEFlipper.html?highlight=stereoisomers> molecules = [] for isomer in oeomega.OEFlipper(oemol, 200, not undefined_only, True, False): if rationalise: # try and determine if the molecule is reasonable by generating a conformer with # strict stereo, like embedding in rdkit omega = oeomega.OEOmega() omega.SetMaxConfs(1) omega.SetCanonOrder(False) # Don't generate random stereoisomer if not specified omega.SetStrictStereo(True) mol = oechem.OEMol(isomer) status = omega(mol) if status: isomol = self.from_openeye(mol, _cls=molecule.__class__) if isomol != molecule: molecules.append(isomol) else: isomol = self.from_openeye(isomer, _cls=molecule.__class__) if isomol != molecule: molecules.append(isomol) return molecules[:max_isomers] def enumerate_tautomers(self, molecule, max_states=20): """ Enumerate the possible tautomers of the current molecule Parameters ---------- molecule: openff.toolkit.topology.Molecule The molecule whose state we should enumerate max_states: int optional, default=20 The maximum amount of molecules that should be returned Returns ------- molecules: List[openff.toolkit.topology.Molecule] A list of openff.toolkit.topology.Molecule instances excluding the input molecule. """ from openeye import oequacpac oemol = self.to_openeye(molecule=molecule) tautomers = [] # set the options tautomer_options = oequacpac.OETautomerOptions() tautomer_options.SetApplyWarts(False) tautomer_options.SetMaxTautomersGenerated(max_states + 1) tautomer_options.SetSaveStereo(True) # this aligns the outputs of rdkit and openeye for the example cases tautomer_options.SetCarbonHybridization(False) for tautomer in oequacpac.OEEnumerateTautomers(oemol, tautomer_options): # remove the input tautomer from the output taut = self.from_openeye( tautomer, allow_undefined_stereo=True, _cls=molecule.__class__ ) if taut != molecule: tautomers.append( self.from_openeye( tautomer, allow_undefined_stereo=True, _cls=molecule.__class__ ) ) return tautomers @staticmethod def _check_mol2_gaff_atom_type(molecule, file_path=None): """Attempts to detect the presence of GAFF atom types in a molecule loaded from a mol2 file. For now, this raises a ``GAFFAtomTypeWarning`` if the molecule include Osmium and Holmium atoms, which have GAFF types OS and HO respectively. Parameters ---------- molecule : openff.toolkit.topology.molecule.Molecule The loaded molecule. file_path : str, optional The path to the mol2 file. This is used exclusively to make the error message more meaningful. """ # Handle default. if file_path is None: file_path = "" else: # Append a ':' character that will separate the file # path from the molecule string representation. file_path = file_path + ":" # atomic_number: (GAFF_type, element_name) warning_atomic_numbers = {76: ("OS", "Osmium"), 67: ("HO", "Holmium")} for atom in molecule.atoms: try: atom_type, element_name = warning_atomic_numbers[atom.atomic_number] except KeyError: pass else: import warnings warn_msg = ( f'OpenEye interpreted the type "{atom_type}" in {file_path}{molecule.name}' f" as {element_name}. Does your mol2 file uses Tripos SYBYL atom types?" " Other atom types such as GAFF are not supported." ) warnings.warn(warn_msg, GAFFAtomTypeWarning) @staticmethod def _openeye_cip_atom_stereochemistry(oemol, oeatom): """ Determine CIP stereochemistry (R/S) for the specified atom Parameters ---------- oemol : openeye.oechem.OEMolBase The molecule of interest oeatom : openeye.oechem.OEAtomBase The atom whose stereochemistry is to be computed Returns ------- stereochemistry : str 'R', 'S', or None if no stereochemistry is specified or the atom is not a stereocenter """ from openeye import oechem if not oeatom.HasStereoSpecified(): # No stereochemical information has been stored, so this could be unknown stereochemistry # TODO: Should we raise an exception? return None cip = oechem.OEPerceiveCIPStereo(oemol, oeatom) if cip == oechem.OECIPAtomStereo_S: return "S" elif cip == oechem.OECIPAtomStereo_R: return "R" elif cip == oechem.OECIPAtomStereo_NotStereo: # Not a stereocenter # TODO: Should this be a different case from ``None``? return None @staticmethod def _openeye_cip_bond_stereochemistry(oemol, oebond): """ Determine CIP stereochemistry (E/Z) for the specified bond Parameters ---------- oemol : openeye.oechem.OEMolBase The molecule of interest oebond : openeye.oechem.OEBondBase The bond whose stereochemistry is to be computed Returns ------- stereochemistry : str 'E', 'Z', or None if stereochemistry is unspecified or the bond is not a stereo bond """ from openeye import oechem if not oebond.HasStereoSpecified(): # No stereochemical information has been stored, so this could be unknown stereochemistry # TODO: Should we raise an exception? return None cip = oechem.OEPerceiveCIPStereo(oemol, oebond) if cip == oechem.OECIPBondStereo_E: return "E" elif cip == oechem.OECIPBondStereo_Z: return "Z" elif cip == oechem.OECIPBondStereo_NotStereo: return None @staticmethod def from_openeye(oemol, allow_undefined_stereo=False, _cls=None): """ Create a Molecule from an OpenEye molecule. If the OpenEye molecule has implicit hydrogens, this function will make them explicit. ``OEAtom`` s have a different set of allowed value for partial charges than ``openff.toolkit.topology.Molecule`` s. In the OpenEye toolkits, partial charges are stored on individual ``OEAtom`` s, and their values are initialized to ``0.0``. In the Open Force Field Toolkit, an ``openff.toolkit.topology.Molecule``'s ``partial_charges`` attribute is initialized to ``None`` and can be set to a ``simtk.unit.Quantity``-wrapped numpy array with units of elementary charge. The Open Force Field Toolkit considers an ``OEMol`` where every ``OEAtom`` has a partial charge of ``float('nan')`` to be equivalent to an Open Force Field Toolkit `Molecule`'s ``partial_charges = None``. This assumption is made in both ``to_openeye`` and ``from_openeye``. .. warning :: This API is experimental and subject to change. Parameters ---------- oemol : openeye.oechem.OEMol An OpenEye molecule allow_undefined_stereo : bool, default=False If false, raises an exception if oemol contains undefined stereochemistry. _cls : class Molecule constructor Returns ------- molecule : openff.toolkit.topology.Molecule An OpenFF molecule Examples -------- Create a Molecule from an OpenEye OEMol >>> from openeye import oechem >>> from openff.toolkit.tests.utils import get_data_file_path >>> ifs = oechem.oemolistream(get_data_file_path('systems/monomers/ethanol.mol2')) >>> oemols = list(ifs.GetOEGraphMols()) >>> toolkit_wrapper = OpenEyeToolkitWrapper() >>> molecule = toolkit_wrapper.from_openeye(oemols[0]) """ import math from openeye import oechem oemol = oechem.OEMol(oemol) # Add explicit hydrogens if they're implicit if oechem.OEHasImplicitHydrogens(oemol): oechem.OEAddExplicitHydrogens(oemol) # TODO: Is there any risk to perceiving aromaticity here instead of later? oechem.OEAssignAromaticFlags(oemol, oechem.OEAroModel_MDL) oechem.OEPerceiveChiral(oemol) # Check that all stereo is specified # Potentially better OE stereo check: OEFlipper — Toolkits - - Python # https: // docs.eyesopen.com / toolkits / python / omegatk / OEConfGenFunctions / OEFlipper.html unspec_chiral = False unspec_db = False problematic_atoms = list() problematic_bonds = list() for oeatom in oemol.GetAtoms(): if oeatom.IsChiral(): if not (oeatom.HasStereoSpecified()): unspec_chiral = True problematic_atoms.append(oeatom) for oebond in oemol.GetBonds(): if oebond.IsChiral(): if not (oebond.HasStereoSpecified()): unspec_db = True problematic_bonds.append(oebond) if unspec_chiral or unspec_db: def oeatom_to_str(oeatom): return "atomic num: {}, name: {}, idx: {}, aromatic: {}, chiral: {}".format( oeatom.GetAtomicNum(), oeatom.GetName(), oeatom.GetIdx(), oeatom.IsAromatic(), oeatom.IsChiral(), ) def oebond_to_str(oebond): return "order: {}, chiral: {}".format( oebond.GetOrder(), oebond.IsChiral() ) def describe_oeatom(oeatom): description = "Atom {} with bonds:".format(oeatom_to_str(oeatom)) for oebond in oeatom.GetBonds(): description += "\nbond {} to atom {}".format( oebond_to_str(oebond), oeatom_to_str(oebond.GetNbr(oeatom)) ) return description msg = ( "OEMol has unspecified stereochemistry. " "oemol.GetTitle(): {}\n".format(oemol.GetTitle()) ) if len(problematic_atoms) != 0: msg += "Problematic atoms are:\n" for problematic_atom in problematic_atoms: msg += describe_oeatom(problematic_atom) + "\n" if len(problematic_bonds) != 0: msg += "Problematic bonds are: {}\n".format(problematic_bonds) if allow_undefined_stereo: msg = "Warning (not error because allow_undefined_stereo=True): " + msg logger.warning(msg) else: msg = "Unable to make OFFMol from OEMol: " + msg raise UndefinedStereochemistryError(msg) if _cls is None: from openff.toolkit.topology.molecule import Molecule _cls = Molecule molecule = _cls() molecule.name = oemol.GetTitle() # Copy any attached SD tag information for dp in oechem.OEGetSDDataPairs(oemol): molecule._properties[dp.GetTag()] = dp.GetValue() map_atoms = dict() # {oemol_idx: molecule_idx} atom_mapping = {} for oeatom in oemol.GetAtoms(): oe_idx = oeatom.GetIdx() map_id = oeatom.GetMapIdx() atomic_number = oeatom.GetAtomicNum() formal_charge = oeatom.GetFormalCharge() * unit.elementary_charge is_aromatic = oeatom.IsAromatic() stereochemistry = OpenEyeToolkitWrapper._openeye_cip_atom_stereochemistry( oemol, oeatom ) # stereochemistry = self._openeye_cip_atom_stereochemistry(oemol, oeatom) name = "" if oeatom.HasData("name"): name = oeatom.GetData("name") atom_index = molecule._add_atom( atomic_number, formal_charge, is_aromatic, stereochemistry=stereochemistry, name=name, ) map_atoms[ oe_idx ] = atom_index # store for mapping oeatom to molecule atom indices below atom_mapping[atom_index] = map_id # If we have a full / partial atom map add it to the molecule. Zeroes 0 # indicates no mapping if {*atom_mapping.values()} != {0}: molecule._properties["atom_map"] = { idx: map_idx for idx, map_idx in atom_mapping.items() if map_idx != 0 } for oebond in oemol.GetBonds(): atom1_index = map_atoms[oebond.GetBgnIdx()] atom2_index = map_atoms[oebond.GetEndIdx()] bond_order = oebond.GetOrder() is_aromatic = oebond.IsAromatic() stereochemistry = OpenEyeToolkitWrapper._openeye_cip_bond_stereochemistry( oemol, oebond ) if oebond.HasData("fractional_bond_order"): fractional_bond_order = oebond.GetData("fractional_bond_order") else: fractional_bond_order = None molecule._add_bond( atom1_index, atom2_index, bond_order, is_aromatic=is_aromatic, stereochemistry=stereochemistry, fractional_bond_order=fractional_bond_order, ) # TODO: Copy conformations, if present # TODO: Come up with some scheme to know when to import coordinates # From SMILES: no # From MOL2: maybe # From other: maybe if hasattr(oemol, "GetConfs"): for conf in oemol.GetConfs(): n_atoms = molecule.n_atoms positions = unit.Quantity( np.zeros(shape=[n_atoms, 3], dtype=np.float64), unit.angstrom ) for oe_id in conf.GetCoords().keys(): off_atom_coords = unit.Quantity( conf.GetCoords()[oe_id], unit.angstrom ) off_atom_index = map_atoms[oe_id] positions[off_atom_index, :] = off_atom_coords if (positions == 0 * unit.angstrom).all() and n_atoms > 1: continue molecule._add_conformer(positions) # Copy partial charges, if present partial_charges = unit.Quantity( np.zeros(shape=molecule.n_atoms, dtype=np.float64), unit=unit.elementary_charge, ) # If all OEAtoms have a partial charge of NaN, then the OFFMol should # have its partial_charges attribute set to None any_partial_charge_is_not_nan = False for oe_atom in oemol.GetAtoms(): oe_idx = oe_atom.GetIdx() off_idx = map_atoms[oe_idx] unitless_charge = oe_atom.GetPartialCharge() if not math.isnan(unitless_charge): any_partial_charge_is_not_nan = True # break charge = unitless_charge * unit.elementary_charge partial_charges[off_idx] = charge if any_partial_charge_is_not_nan: molecule.partial_charges = partial_charges else: molecule.partial_charges = None return molecule @staticmethod def to_openeye(molecule, aromaticity_model=DEFAULT_AROMATICITY_MODEL): """ Create an OpenEye molecule using the specified aromaticity model ``OEAtom`` s have a different set of allowed value for partial charges than ``openff.toolkit.topology.Molecule``\ s. In the OpenEye toolkits, partial charges are stored on individual ``OEAtom``\ s, and their values are initialized to ``0.0``. In the Open Force Field Toolkit, an``openff.toolkit.topology.Molecule``'s ``partial_charges`` attribute is initialized to ``None`` and can be set to a ``simtk.unit.Quantity``-wrapped numpy array with units of elementary charge. The Open Force Field Toolkit considers an ``OEMol`` where every ``OEAtom`` has a partial charge of ``float('nan')`` to be equivalent to an Open Force Field Toolkit ``Molecule``'s ``partial_charges = None``. This assumption is made in both ``to_openeye`` and ``from_openeye``. .. todo :: * Should the aromaticity model be specified in some other way? .. warning :: This API is experimental and subject to change. Parameters ---------- molecule : openff.toolkit.topology.molecule.Molecule object The molecule to convert to an OEMol aromaticity_model : str, optional, default=DEFAULT_AROMATICITY_MODEL The aromaticity model to use Returns ------- oemol : openeye.oechem.OEMol An OpenEye molecule Examples -------- Create an OpenEye molecule from a Molecule >>> from openff.toolkit.topology import Molecule >>> toolkit_wrapper = OpenEyeToolkitWrapper() >>> molecule = Molecule.from_smiles('CC') >>> oemol = toolkit_wrapper.to_openeye(molecule) """ from openeye import oechem if hasattr(oechem, aromaticity_model): oe_aro_model = getattr(oechem, aromaticity_model) else: raise ValueError( "Error: provided aromaticity model not recognized by oechem." ) oemol = oechem.OEMol() # if not(molecule.name is None): oemol.SetTitle(molecule.name) map_atoms = {} # {off_idx : oe_idx} # Add atoms oemol_atoms = list() # list of corresponding oemol atoms for atom in molecule.atoms: oeatom = oemol.NewAtom(atom.atomic_number) oeatom.SetFormalCharge( atom.formal_charge.value_in_unit(unit.elementary_charge) ) # simtk.unit.Quantity(1, unit.elementary_charge) # TODO: Do we want to provide _any_ pathway for Atom.is_aromatic to influence the OEMol? # oeatom.SetAromatic(atom.is_aromatic) oeatom.SetData("name", atom.name) oeatom.SetPartialCharge(float("nan")) oemol_atoms.append(oeatom) map_atoms[atom.molecule_atom_index] = oeatom.GetIdx() # Add bonds oemol_bonds = list() # list of corresponding oemol bonds for bond in molecule.bonds: # atom1_index = molecule.atoms.index(bond.atom1) # atom2_index = molecule.atoms.index(bond.atom2) atom1_index = bond.atom1_index atom2_index = bond.atom2_index oebond = oemol.NewBond(oemol_atoms[atom1_index], oemol_atoms[atom2_index]) oebond.SetOrder(bond.bond_order) # TODO: Do we want to provide _any_ pathway for Bond.is_aromatic to influence the OEMol? # oebond.SetAromatic(bond.is_aromatic) if not (bond.fractional_bond_order is None): oebond.SetData("fractional_bond_order", bond.fractional_bond_order) oemol_bonds.append(oebond) oechem.OEAssignAromaticFlags(oemol, oe_aro_model) # Set atom stereochemistry now that all connectivity is in place for atom, oeatom in zip(molecule.atoms, oemol_atoms): if not atom.stereochemistry: continue # Set arbitrary initial stereochemistry neighs = [n for n in oeatom.GetAtoms()] oeatom.SetStereo( neighs, oechem.OEAtomStereo_Tetra, oechem.OEAtomStereo_Right ) # Flip chirality if stereochemistry isincorrect oeatom_stereochemistry = ( OpenEyeToolkitWrapper._openeye_cip_atom_stereochemistry(oemol, oeatom) ) if oeatom_stereochemistry != atom.stereochemistry: # Flip the stereochemistry oeatom.SetStereo( neighs, oechem.OEAtomStereo_Tetra, oechem.OEAtomStereo_Left ) # Verify it matches now as a sanity check oeatom_stereochemistry = ( OpenEyeToolkitWrapper._openeye_cip_atom_stereochemistry( oemol, oeatom ) ) if oeatom_stereochemistry != atom.stereochemistry: raise Exception( "Programming error: OpenEye atom stereochemistry assumptions failed." ) # Set bond stereochemistry for bond, oebond in zip(molecule.bonds, oemol_bonds): if not bond.stereochemistry: continue atom1_index = bond.molecule.atoms.index(bond.atom1) atom2_index = bond.molecule.atoms.index(bond.atom2) # Set arbitrary initial stereochemistry oeatom1, oeatom2 = oemol_atoms[atom1_index], oemol_atoms[atom2_index] oeatom1_neighbor = [n for n in oeatom1.GetAtoms() if not n == oeatom2][0] oeatom2_neighbor = [n for n in oeatom2.GetAtoms() if not n == oeatom1][0] # oebond.SetStereo([oeatom1, oeatom2], oechem.OEBondStereo_CisTrans, oechem.OEBondStereo_Cis) oebond.SetStereo( [oeatom1_neighbor, oeatom2_neighbor], oechem.OEBondStereo_CisTrans, oechem.OEBondStereo_Cis, ) # Flip stereochemistry if incorrect oebond_stereochemistry = ( OpenEyeToolkitWrapper._openeye_cip_bond_stereochemistry(oemol, oebond) ) if oebond_stereochemistry != bond.stereochemistry: # Flip the stereochemistry oebond.SetStereo( [oeatom1_neighbor, oeatom2_neighbor], oechem.OEBondStereo_CisTrans, oechem.OEBondStereo_Trans, ) # Verify it matches now as a sanity check oebond_stereochemistry = ( OpenEyeToolkitWrapper._openeye_cip_bond_stereochemistry( oemol, oebond ) ) if oebond_stereochemistry != bond.stereochemistry: raise Exception( "Programming error: OpenEye bond stereochemistry assumptions failed." ) # Retain conformations, if present if molecule.n_conformers != 0: oemol.DeleteConfs() for conf in molecule._conformers: # OE needs a 1 x (3*n_Atoms) double array as input flat_coords = np.zeros(shape=oemol.NumAtoms() * 3, dtype=np.float64) for index, oe_idx in map_atoms.items(): (x, y, z) = conf[index, :] / unit.angstrom flat_coords[(3 * oe_idx)] = x flat_coords[(3 * oe_idx) + 1] = y flat_coords[(3 * oe_idx) + 2] = z oecoords = oechem.OEFloatArray(flat_coords) oemol.NewConf(oecoords) # Retain charges, if present. All atoms are initialized above with a partial charge of NaN. if molecule._partial_charges is not None: oe_indexed_charges = np.zeros(shape=molecule.n_atoms, dtype=np.float64) for off_idx, charge in enumerate(molecule._partial_charges): oe_idx = map_atoms[off_idx] charge_unitless = charge / unit.elementary_charge oe_indexed_charges[oe_idx] = charge_unitless # TODO: This loop below fails if we try to use an "enumerate"-style loop. # It's worth investigating whether we make this assumption elsewhere in the codebase, since # the OE docs may indicate that this sort of usage is a very bad thing to do. # https://docs.eyesopen.com/toolkits/python/oechemtk/atombondindices.html#indices-for-molecule-lookup-considered-harmful # for oe_idx, oe_atom in enumerate(oemol.GetAtoms()): for oe_atom in oemol.GetAtoms(): oe_idx = oe_atom.GetIdx() oe_atom.SetPartialCharge(oe_indexed_charges[oe_idx]) # Retain properties, if present for key, value in molecule.properties.items(): oechem.OESetSDData(oemol, str(key), str(value)) # Clean Up phase # The only feature of a molecule that wasn't perceived above seemed to be ring connectivity, better to run it # here then for someone to inquire about ring sizes and get 0 when it shouldn't be oechem.OEFindRingAtomsAndBonds(oemol) return oemol def to_smiles(self, molecule, isomeric=True, explicit_hydrogens=True, mapped=False): """ Uses the OpenEye toolkit to convert a Molecule into a SMILES string. A partially mapped smiles can also be generated for atoms of interest by supplying an `atom_map` to the properties dictionary. Parameters ---------- molecule : An openff.toolkit.topology.Molecule The molecule to convert into a SMILES. isomeric: bool optional, default= True return an isomeric smiles explicit_hydrogens: bool optional, default=True return a smiles string containing all hydrogens explicitly mapped: bool optional, default=False return a explicit hydrogen mapped smiles, the atoms to be mapped can be controlled by supplying an atom map into the properties dictionary. If no mapping is passed all atoms will be mapped in order, else an atom map dictionary from the current atom index to the map id should be supplied with no duplicates. The map ids (values) should start from 0 or 1. Returns ------- smiles : str The SMILES of the input molecule. """ from openeye import oechem oemol = self.to_openeye(molecule) # this sets up the default settings following the old DEFAULT flag # more information on flags can be found here # <https://docs.eyesopen.com/toolkits/python/oechemtk/OEChemConstants/OESMILESFlag.html#OEChem::OESMILESFlag> smiles_options = ( oechem.OESMILESFlag_Canonical | oechem.OESMILESFlag_Isotopes | oechem.OESMILESFlag_RGroups ) # check if we want an isomeric smiles if isomeric: # add the atom and bond stereo flags smiles_options |= ( oechem.OESMILESFlag_AtomStereo | oechem.OESMILESFlag_BondStereo ) if explicit_hydrogens: # add the hydrogen flag smiles_options |= oechem.OESMILESFlag_Hydrogens if mapped: assert explicit_hydrogens is True, ( "Mapped smiles require all hydrogens and " "stereochemsitry to be defined to retain order" ) # if we only want to map specific atoms check for an atom map atom_map = molecule._properties.get("atom_map", None) if atom_map is not None: # make sure there are no repeated indices map_ids = set(atom_map.values()) if len(map_ids) < len(atom_map): atom_map = None elif 0 in atom_map.values(): # we need to increment the map index for atom, map in atom_map.items(): atom_map[atom] = map + 1 if atom_map is None: # now we need to add the atom map to the atoms for oeatom in oemol.GetAtoms(): oeatom.SetMapIdx(oeatom.GetIdx() + 1) else: for atom in oemol.GetAtoms(): try: # try to set the atom map map_idx = atom_map[atom.GetIdx()] atom.SetMapIdx(map_idx) except KeyError: continue smiles_options |= oechem.OESMILESFlag_AtomMaps smiles = oechem.OECreateSmiString(oemol, smiles_options) return smiles def to_inchi(self, molecule, fixed_hydrogens=False): """ Create an InChI string for the molecule using the RDKit Toolkit. InChI is a standardised representation that does not capture tautomers unless specified using the fixed hydrogen layer. For information on InChi see here https://iupac.org/who-we-are/divisions/division-details/inchi/ Parameters ---------- molecule : An openff.toolkit.topology.Molecule The molecule to convert into a SMILES. fixed_hydrogens: bool, default=False If a fixed hydrogen layer should be added to the InChI, if `True` this will produce a non standard specific InChI string of the molecule. Returns -------- inchi: str The InChI string of the molecule. """ from openeye import oechem oemol = self.to_openeye(molecule) if fixed_hydrogens: opts = oechem.OEInChIOptions() opts.SetFixedHLayer(True) inchi = oechem.OEMolToInChI(oemol) else: inchi = oechem.OEMolToSTDInChI(oemol) return inchi def to_inchikey(self, molecule, fixed_hydrogens=False): """ Create an InChIKey for the molecule using the RDKit Toolkit. InChIKey is a standardised representation that does not capture tautomers unless specified using the fixed hydrogen layer. For information on InChi see here https://iupac.org/who-we-are/divisions/division-details/inchi/ Parameters ---------- molecule : An openff.toolkit.topology.Molecule The molecule to convert into a SMILES. fixed_hydrogens: bool, default=False If a fixed hydrogen layer should be added to the InChI, if `True` this will produce a non standard specific InChI string of the molecule. Returns -------- inchi_key: str The InChIKey representation of the molecule. """ from openeye import oechem oemol = self.to_openeye(molecule) if fixed_hydrogens: opts = oechem.OEInChIOptions() opts.SetFixedHLayer(True) inchi_key = oechem.OEMolToInChIKey(oemol) else: inchi_key = oechem.OEMolToSTDInChIKey(oemol) return inchi_key def to_iupac(self, molecule): """Generate IUPAC name from Molecule Parameters ---------- molecule : An openff.toolkit.topology.Molecule The molecule to convert into a SMILES. Returns ------- iupac_name : str IUPAC name of the molecule Examples -------- >>> from openff.toolkit.topology import Molecule >>> from openff.toolkit.utils import get_data_file_path >>> sdf_filepath = get_data_file_path('molecules/ethanol.sdf') >>> molecule = Molecule(sdf_filepath) >>> toolkit = OpenEyeToolkitWrapper() >>> iupac_name = toolkit.to_iupac(molecule) """ from openeye import oeiupac oemol = self.to_openeye(molecule) return oeiupac.OECreateIUPACName(oemol) def canonical_order_atoms(self, molecule): """ Canonical order the atoms in the molecule using the OpenEye toolkit. Parameters ---------- molecule: openff.toolkit.topology.Molecule The input molecule Returns ------- molecule : openff.toolkit.topology.Molecule The input molecule, with canonically-indexed atoms and bonds. """ from openeye import oechem oemol = self.to_openeye(molecule) oechem.OECanonicalOrderAtoms(oemol) oechem.OECanonicalOrderBonds(oemol) # reorder the iterator vatm = [] for atom in oemol.GetAtoms(): if atom.GetAtomicNum() != oechem.OEElemNo_H: vatm.append(atom) oemol.OrderAtoms(vatm) vbnd = [] for bond in oemol.GetBonds(): if ( bond.GetBgn().GetAtomicNum() != oechem.OEElemNo_H and bond.GetEnd().GetAtomicNum() != oechem.OEElemNo_H ): vbnd.append(bond) oemol.OrderBonds(vbnd) oemol.Sweep() for bond in oemol.GetBonds(): if bond.GetBgnIdx() > bond.GetEndIdx(): bond.SwapEnds() return self.from_openeye( oemol, allow_undefined_stereo=True, _cls=molecule.__class__ ) def from_smiles( self, smiles, hydrogens_are_explicit=False, allow_undefined_stereo=False, _cls=None, ): """ Create a Molecule from a SMILES string using the OpenEye toolkit. .. warning :: This API is experimental and subject to change. Parameters ---------- smiles : str The SMILES string to turn into a molecule hydrogens_are_explicit : bool, default = False If False, OE will perform hydrogen addition using OEAddExplicitHydrogens allow_undefined_stereo : bool, default=False Whether to accept SMILES with undefined stereochemistry. If False, an exception will be raised if a SMILES with undefined stereochemistry is passed into this function. _cls : class Molecule constructor Returns ------- molecule : openff.toolkit.topology.Molecule An OpenFF style molecule. """ from openeye import oechem oemol = oechem.OEGraphMol() oechem.OESmilesToMol(oemol, smiles) if not (hydrogens_are_explicit): result = oechem.OEAddExplicitHydrogens(oemol) if not result: raise ValueError( "Addition of explicit hydrogens failed in from_openeye" ) elif hydrogens_are_explicit and oechem.OEHasImplicitHydrogens(oemol): raise ValueError( f"'hydrogens_are_explicit' was specified as True, but OpenEye Toolkit interpreted " f"SMILES '{smiles}' as having implicit hydrogen. If this SMILES is intended to " f"express all explicit hydrogens in the molecule, then you should construct the " f"desired molecule as an OEMol (where oechem.OEHasImplicitHydrogens(oemol) returns " f"False), and then use Molecule.from_openeye() to create the desired OFFMol." ) # Set partial charges to None, since they couldn't have been stored in a SMILES for atom in oemol.GetAtoms(): atom.SetPartialCharge(float("nan")) molecule = self.from_openeye( oemol, _cls=_cls, allow_undefined_stereo=allow_undefined_stereo ) return molecule def from_inchi(self, inchi, allow_undefined_stereo=False, _cls=None): """ Construct a Molecule from a InChI representation Parameters ---------- inchi : str The InChI representation of the molecule. allow_undefined_stereo : bool, default=False Whether to accept InChI with undefined stereochemistry. If False, an exception will be raised if a InChI with undefined stereochemistry is passed into this function. _cls : class Molecule constructor Returns ------- molecule : openff.toolkit.topology.Molecule """ from openeye import oechem # This calls the same functions as OESmilesToMol oemol = oechem.OEGraphMol() oechem.OEInChIToMol(oemol, inchi) # try and catch InChI parsing fails # if there are no atoms don't build the molecule if oemol.NumAtoms() == 0: raise RuntimeError( "There was an issue parsing the InChI string, please check and try again." ) molecule = self.from_openeye( oemol, allow_undefined_stereo=allow_undefined_stereo, _cls=_cls ) return molecule def from_iupac(self, iupac_name, allow_undefined_stereo=False, _cls=None, **kwargs): """ Construct a Molecule from an IUPAC name Parameters ---------- iupac_name : str The IUPAC or common name of the molecule. allow_undefined_stereo : bool, default=False Whether to accept a molecule name with undefined stereochemistry. If False, an exception will be raised if a molecule name with undefined stereochemistry is passed into this function. _cls : class Molecule constructor Returns ------- molecule : openff.toolkit.topology.Molecule """ from openeye import oechem, oeiupac oemol = oechem.OEMol() parsing_result = oeiupac.OEParseIUPACName(oemol, iupac_name) if not parsing_result: raise InvalidIUPACNameError( f"OpenEye failed to parse {iupac_name} as a IUPAC name" ) oechem.OETriposAtomNames(oemol) result = oechem.OEAddExplicitHydrogens(oemol) if not result: raise Exception("Addition of explicit hydrogens failed in from_iupac") molecule = self.from_openeye( oemol, allow_undefined_stereo=allow_undefined_stereo, _cls=_cls, **kwargs ) return molecule def generate_conformers( self, molecule, n_conformers=1, rms_cutoff=None, clear_existing=True ): """ Generate molecule conformers using OpenEye Omega. .. warning :: This API is experimental and subject to change. .. todo :: * which parameters should we expose? (or can we implement a general system with \*\*kwargs?) * will the coordinates be returned in the OpenFF Molecule's own indexing system? Or is there a chance that they'll get reindexed when we convert the input into an OEmol? Parameters ---------- molecule : a :class:`Molecule` The molecule to generate conformers for. n_conformers : int, default=1 The maximum number of conformers to generate. rms_cutoff : simtk.Quantity-wrapped float, in units of distance, optional, default=None The minimum RMS value at which two conformers are considered redundant and one is deleted. If None, the cutoff is set to 1 Angstrom clear_existing : bool, default=True Whether to overwrite existing conformers for the molecule """ from openeye import oeomega oemol = self.to_openeye(molecule) omega = oeomega.OEOmega() omega.SetMaxConfs(n_conformers) omega.SetCanonOrder(False) omega.SetSampleHydrogens(True) omega.SetEnergyWindow(15.0) # unit? if rms_cutoff is None: omega.SetRMSThreshold(1.0) else: omega.SetRMSThreshold(rms_cutoff.value_in_unit(unit.angstrom)) # Don't generate random stereoisomer if not specified omega.SetStrictStereo(True) status = omega(oemol) if status is False: omega.SetStrictStereo(False) new_status = omega(oemol) if new_status is False: raise Exception("OpenEye Omega conformer generation failed") molecule2 = self.from_openeye( oemol, allow_undefined_stereo=True, _cls=molecule.__class__ ) if clear_existing: molecule._conformers = list() for conformer in molecule2._conformers: molecule._add_conformer(conformer) def apply_elf_conformer_selection( self, molecule: "Molecule", percentage: float = 2.0, limit: int = 10, ): """Applies the `ELF method <https://docs.eyesopen.com/toolkits/python/quacpactk/molchargetheory.html#elf-conformer-selection>`_ to select a set of diverse conformers which have minimal electrostatically strongly interacting functional groups from a molecules conformers. Notes ----- * The input molecule should have a large set of conformers already generated to select the ELF conformers from. * The selected conformers will be retained in the `molecule.conformers` list while unselected conformers will be discarded. See Also -------- RDKitToolkitWrapper.apply_elf_conformer_selection Parameters ---------- molecule The molecule which contains the set of conformers to select from. percentage The percentage of conformers with the lowest electrostatic interaction energies to greedily select from. limit The maximum number of conformers to select. """ from openeye import oechem, oequacpac if molecule.n_conformers == 0: return oe_molecule = molecule.to_openeye() # Select a subset of the OMEGA generated conformers using the ELF10 method. oe_elf_options = oequacpac.OEELFOptions() oe_elf_options.SetElfLimit(limit) oe_elf_options.SetPercent(percentage) oe_elf = oequacpac.OEELF(oe_elf_options) output_stream = oechem.oeosstream() oechem.OEThrow.SetOutputStream(output_stream) oechem.OEThrow.Clear() status = oe_elf.Select(oe_molecule) oechem.OEThrow.SetOutputStream(oechem.oeerr) output_string = output_stream.str().decode("UTF-8") output_string = output_string.replace("Warning: ", "") output_string = re.sub("^: +", "", output_string, flags=re.MULTILINE) output_string = re.sub("\n$", "", output_string) # Check to make sure the call to OE was succesful, and re-route any # non-fatal warnings to the correct logger. if not status: raise RuntimeError("\n" + output_string) elif len(output_string) > 0: logger.warning(output_string) # Extract and store the ELF conformers on the input molecule. conformers = [] for oe_conformer in oe_molecule.GetConfs(): conformer = np.zeros((oe_molecule.NumAtoms(), 3)) for atom_index, coordinates in oe_conformer.GetCoords().items(): conformer[atom_index, :] = coordinates conformers.append(conformer * unit.angstrom) molecule._conformers = conformers def assign_partial_charges( self, molecule, partial_charge_method=None, use_conformers=None, strict_n_conformers=False, _cls=None, ): """ Compute partial charges with OpenEye quacpac, and assign the new values to the partial_charges attribute. .. warning :: This API is experimental and subject to change. .. todo :: * Should the default be ELF? * Can we expose more charge models? Parameters ---------- molecule : openff.toolkit.topology.Molecule Molecule for which partial charges are to be computed partial_charge_method : str, optional, default=None The charge model to use. One of ['amberff94', 'mmff', 'mmff94', `am1-mulliken`, 'am1bcc', 'am1bccnosymspt', 'am1bccelf10'] If None, 'am1-mulliken' will be used. use_conformers : iterable of simtk.unit.Quantity-wrapped numpy arrays, each with shape (n_atoms, 3) and dimension of distance. Optional, default = None Coordinates to use for partial charge calculation. If None, an appropriate number of conformers will be generated. strict_n_conformers : bool, default=False Whether to raise an exception if an invalid number of conformers is provided for the given charge method. If this is False and an invalid number of conformers is found, a warning will be raised. _cls : class Molecule constructor Raises ------ ChargeMethodUnavailableError if the requested charge method can not be handled by this toolkit ChargeCalculationError if the charge method is supported by this toolkit, but fails """ import numpy as np from openeye import oechem, oequacpac from openff.toolkit.topology import Molecule SUPPORTED_CHARGE_METHODS = { "am1bcc": { "oe_charge_method": oequacpac.OEAM1BCCCharges, "min_confs": 1, "max_confs": 1, "rec_confs": 1, }, "am1-mulliken": { "oe_charge_method": oequacpac.OEAM1Charges, "min_confs": 1, "max_confs": 1, "rec_confs": 1, }, "gasteiger": { "oe_charge_method": oequacpac.OEGasteigerCharges, "min_confs": 0, "max_confs": 0, "rec_confs": 0, }, "mmff94": { "oe_charge_method": oequacpac.OEMMFF94Charges, "min_confs": 0, "max_confs": 0, "rec_confs": 0, }, "am1bccnosymspt": { "oe_charge_method": oequacpac.OEAM1BCCCharges, "min_confs": 1, "max_confs": 1, "rec_confs": 1, }, "am1elf10": { "oe_charge_method": oequacpac.OEELFCharges( oequacpac.OEAM1Charges(optimize=True, symmetrize=True), 10 ), "min_confs": 1, "max_confs": None, "rec_confs": 500, }, "am1bccelf10": { "oe_charge_method": oequacpac.OEAM1BCCELF10Charges, "min_confs": 1, "max_confs": None, "rec_confs": 500, }, } if partial_charge_method is None: partial_charge_method = "am1-mulliken" partial_charge_method = partial_charge_method.lower() if partial_charge_method not in SUPPORTED_CHARGE_METHODS: raise ChargeMethodUnavailableError( f"partial_charge_method '{partial_charge_method}' is not available from OpenEyeToolkitWrapper. " f"Available charge methods are {list(SUPPORTED_CHARGE_METHODS.keys())} " ) charge_method = SUPPORTED_CHARGE_METHODS[partial_charge_method] if _cls is None: from openff.toolkit.topology.molecule import Molecule _cls = Molecule # Make a temporary copy of the molecule, since we'll be messing with its conformers mol_copy = _cls(molecule) if use_conformers is None: if charge_method["rec_confs"] == 0: mol_copy._conformers = None else: self.generate_conformers( mol_copy, n_conformers=charge_method["rec_confs"], rms_cutoff=0.25 * unit.angstrom, ) # TODO: What's a "best practice" RMS cutoff to use here? else: mol_copy._conformers = None for conformer in use_conformers: mol_copy._add_conformer(conformer) self._check_n_conformers( mol_copy, partial_charge_method=partial_charge_method, min_confs=charge_method["min_confs"], max_confs=charge_method["max_confs"], strict_n_conformers=strict_n_conformers, ) oemol = mol_copy.to_openeye() errfs = oechem.oeosstream() oechem.OEThrow.SetOutputStream(errfs) oechem.OEThrow.Clear() # The OpenFF toolkit has always supported a version of AM1BCC with no geometry optimization # or symmetry correction. So we include this keyword to provide a special configuration of quacpac # if requested. if partial_charge_method == "am1bccnosymspt": optimize = False symmetrize = False quacpac_status = oequacpac.OEAssignCharges( oemol, charge_method["oe_charge_method"](optimize, symmetrize) ) else: oe_charge_method = charge_method["oe_charge_method"] if callable(oe_charge_method): oe_charge_method = oe_charge_method() quacpac_status = oequacpac.OEAssignCharges(oemol, oe_charge_method) oechem.OEThrow.SetOutputStream(oechem.oeerr) # restoring to original state # This logic handles errors encountered in #34, which can occur when using ELF10 conformer selection if not quacpac_status: oe_charge_engine = ( oequacpac.OEAM1Charges if partial_charge_method == "am1elf10" else oequacpac.OEAM1BCCCharges ) if "SelectElfPop: issue with removing trans COOH conformers" in ( errfs.str().decode("UTF-8") ): logger.warning( f"Warning: charge assignment involving ELF10 conformer selection failed due to a known bug (toolkit issue " f"#346). Downgrading to {oe_charge_engine.__name__} charge assignment for this molecule. More information" f"is available at https://github.com/openforcefield/openff-toolkit/issues/346" ) quacpac_status = oequacpac.OEAssignCharges(oemol, oe_charge_engine()) if quacpac_status is False: raise ChargeCalculationError( f'Unable to assign charges: {errfs.str().decode('UTF-8')}' ) # Extract and return charges ## TODO: Make sure atom mapping remains constant charges = unit.Quantity( np.zeros(shape=oemol.NumAtoms(), dtype=np.float64), unit.elementary_charge ) for oeatom in oemol.GetAtoms(): index = oeatom.GetIdx() charge = oeatom.GetPartialCharge() charge = charge * unit.elementary_charge charges[index] = charge molecule.partial_charges = charges def compute_partial_charges_am1bcc( self, molecule, use_conformers=None, strict_n_conformers=False ): """ Compute AM1BCC partial charges with OpenEye quacpac. This function will attempt to use the OEAM1BCCELF10 charge generation method, but may print a warning and fall back to normal OEAM1BCC if an error is encountered. This error is known to occur with some carboxylic acids, and is under investigation by OpenEye. .. warning :: This API is experimental and subject to change. Parameters ---------- molecule : Molecule Molecule for which partial charges are to be computed use_conformers : iterable of simtk.unit.Quantity-wrapped numpy arrays, each with shape (n_atoms, 3) and dimension of distance. Optional, default = None Coordinates to use for partial charge calculation. If None, an appropriate number of conformers will be generated. strict_n_conformers : bool, default=False Whether to raise an exception if an invalid number of conformers is provided. If this is False and an invalid number of conformers is found, a warning will be raised instead of an Exception. Returns ------- charges : numpy.array of shape (natoms) of type float The partial charges """ import warnings warnings.warn( "compute_partial_charges_am1bcc will be deprecated in an upcoming release. " "Use assign_partial_charges(partial_charge_method='am1bccelf10') instead.", DeprecationWarning, ) self.assign_partial_charges( molecule, partial_charge_method="am1bccelf10", use_conformers=use_conformers, strict_n_conformers=strict_n_conformers, ) return molecule.partial_charges def assign_fractional_bond_orders( self, molecule, bond_order_model=None, use_conformers=None, _cls=None ): """ Update and store list of bond orders this molecule. Bond orders are stored on each bond, in the `bond.fractional_bond_order` attribute. .. warning :: This API is experimental and subject to change. Parameters ---------- molecule : openff.toolkit.topology.molecule Molecule The molecule to assign wiberg bond orders to bond_order_model : str, optional, default=None The charge model to use. One of ['am1-wiberg', 'am1-wiberg-elf10', 'pm3-wiberg', 'pm3-wiberg-elf10']. If None, 'am1-wiberg' will be used. use_conformers : iterable of simtk.unit.Quantity(np.array) with shape (n_atoms, 3) and dimension of distance, optional, default=None The conformers to use for fractional bond order calculation. If None, an appropriate number of conformers will be generated by an available ToolkitWrapper. If the chosen ``bond_order_model`` is an ELF variant, the ELF conformer selection method will be applied to the provided conformers. _cls : class Molecule constructor """ from openeye import oechem, oequacpac if _cls is None: from openff.toolkit.topology.molecule import Molecule _cls = Molecule # Make a copy since we'll be messing with this molecule's conformers temp_mol = _cls(molecule) if bond_order_model is None: bond_order_model = "am1-wiberg" is_elf_method = bond_order_model in ["am1-wiberg-elf10", "pm3-wiberg-elf10"] if use_conformers is None: temp_mol.generate_conformers( n_conformers=1 if not is_elf_method else 500, # 0.05 is the recommended RMS when generating a 'Dense' amount of # conformers using Omega: https://docs.eyesopen.com/toolkits/python/ # omegatk/OEConfGenConstants/OEFragBuilderMode.html. rms_cutoff=None if not is_elf_method else 0.05 * unit.angstrom, ) else: temp_mol._conformers = None for conformer in use_conformers: temp_mol._add_conformer(conformer) if temp_mol.n_conformers == 0: raise Exception( "No conformers present in molecule submitted for fractional bond order calculation. Consider " "loading the molecule from a file with geometry already present or running " "molecule.generate_conformers() before calling molecule.compute_wiberg_bond_orders()" ) if is_elf_method: # Apply the ELF10 conformer selection method. temp_mol.apply_elf_conformer_selection() # Set the options to use when computing the WBOs. This is based on example at # https://docs.eyesopen.com/toolkits/python/quacpactk/examples_summary_wibergbondorders.html am1 = oequacpac.OEAM1() am1results = oequacpac.OEAM1Results() am1options = am1.GetOptions() if bond_order_model.startswith("am1-wiberg"): am1options.SetSemiMethod(oequacpac.OEMethodType_AM1) elif bond_order_model.startswith("pm3-wiberg"): # TODO: Make sure that modifying am1options actually works am1options.SetSemiMethod(oequacpac.OEMethodType_PM3) else: raise ValueError( f"Bond order model '{bond_order_model}' is not supported by " f"OpenEyeToolkitWrapper. Supported models are ['am1-wiberg', " f"'am1-wiberg-elf10', 'pm3-wiberg', 'pm3-wiberg-elf10']." ) # Convert the conformers into OE friendly objects to make setting them one # at a time easier. oe_conformers = [ oechem.OEFloatArray(conformer.value_in_unit(unit.angstrom).flatten()) for conformer in temp_mol.conformers ] oemol = self.to_openeye(temp_mol) bond_orders = defaultdict(list) for oe_conformer in oe_conformers: oemol.DeleteConfs() oemol.NewConf(oe_conformer) status = am1.CalcAM1(am1results, oemol) if status is False: raise Exception( "Unable to assign charges (in the process of calculating " "fractional bond orders)" ) for bond in oemol.GetBonds(): bond_orders[bond.GetIdx()].append( am1results.GetBondOrder(bond.GetBgnIdx(), bond.GetEndIdx()) ) # TODO: Will bonds always map back to the same index? Consider doing a # topology mapping. for bond_idx, conformer_bond_orders in bond_orders.items(): # Get bond order order = np.mean(conformer_bond_orders) mol_bond = molecule._bonds[bond_idx] mol_bond.fractional_bond_order = order def get_tagged_smarts_connectivity(self, smarts): """ Returns a tuple of tuples indicating connectivity between tagged atoms in a SMARTS string. Does not return bond order. Parameters ---------- smarts : str The tagged SMARTS to analyze Returns ------- unique_tags : tuple of int A sorted tuple of all unique tagged atom map indices. tagged_atom_connectivity : tuple of tuples of int, shape n_tagged_bonds x 2 A tuple of tuples, where each inner tuple is a pair of tagged atoms (tag_idx_1, tag_idx_2) which are bonded. The inner tuples are ordered smallest-to-largest, and the tuple of tuples is ordered lexically. So the return value for an improper torsion would be ((1, 2), (2, 3), (2, 4)). Raises ------ SMIRKSParsingError If OpenEye toolkit was unable to parse the provided smirks/tagged smarts """ from openeye import oechem from openff.toolkit.typing.chemistry import SMIRKSParsingError qmol = oechem.OEQMol() status = oechem.OEParseSmarts(qmol, smarts) if not status: raise SMIRKSParsingError( f"OpenEye Toolkit was unable to parse SMIRKS {smarts}" ) unique_tags = set() connections = set() for at1 in qmol.GetAtoms(): if at1.GetMapIdx() == 0: continue unique_tags.add(at1.GetMapIdx()) for at2 in at1.GetAtoms(): if at2.GetMapIdx() == 0: continue cxn_to_add = sorted([at1.GetMapIdx(), at2.GetMapIdx()]) connections.add(tuple(cxn_to_add)) connections = tuple(sorted(list(connections))) unique_tags = tuple(sorted(list(unique_tags))) return tuple(unique_tags), tuple(connections) @staticmethod def _find_smarts_matches( oemol, smarts, aromaticity_model=DEFAULT_AROMATICITY_MODEL ): """Find all sets of atoms in the provided OpenEye molecule that match the provided SMARTS string. Parameters ---------- oemol : openeye.oechem.OEMol or similar oemol to process with the SMIRKS in order to find matches smarts : str SMARTS string with any number of sequentially tagged atoms. If there are N tagged atoms numbered 1..N, the resulting matches will be N-tuples of atoms that match the corresponding tagged atoms. aromaticity_model : str, optional, default=None OpenEye aromaticity model designation as a string, such as ``OEAroModel_MDL``. Molecule is prepared with this aromaticity model prior to querying. Returns ------- matches : list of tuples of atoms indices within the ``oemol`` matches[index] is an N-tuple of atom numbers from the ``oemol`` Matches are returned in no guaranteed order. # TODO: What is returned if no matches are found? An empty list, or None? # TODO: Ensure that SMARTS numbers 1, 2, 3... are rendered into order of returnd matches indexed by 0, 1, 2... .. notes :: * Raises ``LicenseError`` if valid OpenEye tools license is not found, rather than causing program to terminate * Raises ``ValueError`` if ``smarts`` query is malformed """ from openeye import oechem from openeye.oechem import OESubSearch # Make a copy of molecule so we don't influence original (probably safer than deepcopy per C Bayly) mol = oechem.OEMol(oemol) # Set up query qmol = oechem.OEQMol() if not oechem.OEParseSmarts(qmol, smarts): raise ValueError(f"Error parsing SMARTS '{smarts}'") # Apply aromaticity model if type(aromaticity_model) == str: # Check if the user has provided a manually-specified aromaticity_model if hasattr(oechem, aromaticity_model): oearomodel = getattr(oechem, aromaticity_model) else: raise ValueError( "Error: provided aromaticity model not recognized by oechem." ) else: raise ValueError("Error: provided aromaticity model must be a string.") # OEPrepareSearch will clobber our desired aromaticity model if we don't sync up mol and qmol ahead of time # Prepare molecule oechem.OEClearAromaticFlags(mol) oechem.OEAssignAromaticFlags(mol, oearomodel) # If aromaticity model was provided, prepare query molecule oechem.OEClearAromaticFlags(qmol) oechem.OEAssignAromaticFlags(qmol, oearomodel) oechem.OEAssignHybridization(mol) oechem.OEAssignHybridization(qmol) # Build list of matches # TODO: The MoleculeImage mapping should preserve ordering of template molecule for equivalent atoms # and speed matching for larger molecules. unique = False # We require all matches, not just one of each kind substructure_search = OESubSearch(qmol) substructure_search.SetMaxMatches(0) oechem.OEPrepareSearch(mol, substructure_search) matches = list() for match in substructure_search.Match(mol, unique): # Compile list of atom indices that match the pattern tags atom_indices = dict() for matched_atom in match.GetAtoms(): if matched_atom.pattern.GetMapIdx() != 0: atom_indices[ matched_atom.pattern.GetMapIdx() - 1 ] = matched_atom.target.GetIdx() # Compress into list atom_indices = [atom_indices[index] for index in range(len(atom_indices))] # Convert to tuple matches.append(tuple(atom_indices)) return matches def find_smarts_matches(self, molecule, smarts, aromaticity_model="OEAroModel_MDL"): """ Find all SMARTS matches for the specified molecule, using the specified aromaticity model. .. warning :: This API is experimental and subject to change. Parameters ---------- molecule : openff.toolkit.topology.Molecule The molecule for which all specified SMARTS matches are to be located smarts : str SMARTS string with optional SMIRKS-style atom tagging aromaticity_model : str, optional, default='OEAroModel_MDL' Molecule is prepared with this aromaticity model prior to querying. .. note :: Currently, the only supported ``aromaticity_model`` is ``OEAroModel_MDL`` """ oemol = self.to_openeye(molecule) return self._find_smarts_matches( oemol, smarts, aromaticity_model=aromaticity_model ) def requires_openeye_module(module_name): def inner_decorator(function): @wraps(function) def wrapper(*args, **kwargs): try: module = importlib.import_module("openeye." + module_name) except (ImportError, ModuleNotFoundError): # TODO: Custom exception raise Exception("openeye." + module_name) try: license_func = OpenEyeToolkitWrapper._license_functions[module_name] except KeyError: # TODO: Custom exception raise Exception(f"we do not currently use {module_name}") # TODO: Custom exception assert getattr(module, license_func)() return function(*args, **kwargs) return wrapper return inner_decorator class RDKitToolkitWrapper(ToolkitWrapper): """ RDKit toolkit wrapper .. warning :: This API is experimental and subject to change. """ _toolkit_name = "The RDKit" _toolkit_installation_instructions = ( "A conda-installable version of the free and open source RDKit cheminformatics " "toolkit can be found at: https://anaconda.org/rdkit/rdkit" ) def __init__(self): super().__init__() self._toolkit_file_read_formats = ["SDF", "MOL", "SMI"] # TODO: Add TDT support if not self.is_available(): raise ToolkitUnavailableException( f"The required toolkit {self._toolkit_name} is not " f"available. {self._toolkit_installation_instructions}" ) else: from rdkit import __version__ as rdkit_version self._toolkit_version = rdkit_version from rdkit import Chem # we have to make sure the toolkit can be loaded before formatting this dict # Note any new file write formats should be added here only self._toolkit_file_write_formats = { "SDF": Chem.SDWriter, "MOL": Chem.SDWriter, "SMI": Chem.SmilesWriter, "PDB": Chem.PDBWriter, "TDT": Chem.TDTWriter, } @property def toolkit_file_write_formats(self): """ List of file formats that this toolkit can write. """ return list(self._toolkit_file_write_formats.keys()) @classmethod def is_available(cls): """ Check whether the RDKit toolkit can be imported Returns ------- is_installed : bool True if RDKit is installed, False otherwise. """ if cls._is_available is None: try: importlib.import_module("rdkit", "Chem") except ImportError: cls._is_available = False else: cls._is_available = True return cls._is_available def from_object(self, obj, allow_undefined_stereo=False, _cls=None): """ If given an rdchem.Mol (or rdchem.Mol-derived object), this function will load it into an openff.toolkit.topology.molecule. Otherwise, it will return False. Parameters ---------- obj : A rdchem.Mol-derived object An object to be type-checked and converted into a Molecule, if possible. allow_undefined_stereo : bool, default=False Whether to accept molecules with undefined stereocenters. If False, an exception will be raised if a molecule with undefined stereochemistry is passed into this function. _cls : class Molecule constructor Returns ------- Molecule or False An openff.toolkit.topology.molecule Molecule. Raises ------ NotImplementedError If the object could not be converted into a Molecule. """ # TODO: Add tests for the from_object functions from rdkit import Chem if _cls is None: from openff.toolkit.topology.molecule import Molecule _cls = Molecule if isinstance(obj, Chem.rdchem.Mol): return _cls.from_rdkit(obj, allow_undefined_stereo=allow_undefined_stereo) raise NotImplementedError( "Cannot create Molecule from {} object".format(type(obj)) ) def from_pdb_and_smiles( self, file_path, smiles, allow_undefined_stereo=False, _cls=None ): """ Create a Molecule from a pdb file and a SMILES string using RDKit. Requires RDKit to be installed. The molecule is created and sanitised based on the SMILES string, we then find a mapping between this molecule and one from the PDB based only on atomic number and connections. The SMILES molecule is then reindex to match the PDB, the conformer is attached and the molecule returned. Parameters ---------- file_path: str PDB file path smiles : str a valid smiles string for the pdb, used for seterochemistry and bond order allow_undefined_stereo : bool, default=False If false, raises an exception if oemol contains undefined stereochemistry. _cls : class Molecule constructor Returns -------- molecule : openff.toolkit.Molecule (or _cls() type) An OFFMol instance with ordering the same as used in the PDB file. Raises ------ InvalidConformerError : if the SMILES and PDB molecules are not isomorphic. """ from rdkit import Chem from openff.toolkit.topology.molecule import InvalidConformerError, Molecule # Make the molecule from smiles offmol = self.from_smiles( smiles, allow_undefined_stereo=allow_undefined_stereo, _cls=_cls ) # Make another molecule from the PDB, allow stero errors here they are expected pdbmol = self.from_rdkit( Chem.MolFromPDBFile(file_path, removeHs=False), allow_undefined_stereo=True, hydrogens_are_explicit=True, _cls=_cls, ) # check isomorphic and get the mapping if true the mapping will be # Dict[pdb_index: offmol_index] sorted by pdb_index isomorphic, mapping = _cls.are_isomorphic( pdbmol, offmol, return_atom_map=True, aromatic_matching=False, formal_charge_matching=False, bond_order_matching=False, atom_stereochemistry_matching=False, bond_stereochemistry_matching=False, ) if mapping is not None: new_mol = offmol.remap(mapping) # the pdb conformer is in the correct order so just attach it here new_mol._add_conformer(pdbmol.conformers[0]) return new_mol else: raise InvalidConformerError("The PDB and SMILES structures do not match.") def from_file( self, file_path, file_format, allow_undefined_stereo=False, _cls=None ): """ Create an openff.toolkit.topology.Molecule from a file using this toolkit. Parameters ---------- file_path : str The file to read the molecule from file_format : str Format specifier, usually file suffix (eg. 'MOL2', 'SMI') Note that not all toolkits support all formats. Check ToolkitWrapper.toolkit_file_read_formats for details. allow_undefined_stereo : bool, default=False If false, raises an exception if oemol contains undefined stereochemistry. _cls : class Molecule constructor Returns ------- molecules : iterable of Molecules a list of Molecule objects is returned. """ from rdkit import Chem file_format = file_format.upper() mols = list() if (file_format == "MOL") or (file_format == "SDF"): for rdmol in Chem.SupplierFromFilename( file_path, removeHs=False, sanitize=False, strictParsing=True ): if rdmol is None: continue # Sanitize the molecules (fails on nitro groups) try: Chem.SanitizeMol( rdmol, Chem.SANITIZE_ALL ^ Chem.SANITIZE_SETAROMATICITY ^ Chem.SANITIZE_ADJUSTHS, ) Chem.AssignStereochemistryFrom3D(rdmol) except ValueError as e: logger.warning(rdmol.GetProp("_Name") + " " + str(e)) continue Chem.SetAromaticity(rdmol, Chem.AromaticityModel.AROMATICITY_MDL) mol = self.from_rdkit( rdmol, allow_undefined_stereo=allow_undefined_stereo, _cls=_cls ) mols.append(mol) elif file_format == "SMI": # TODO: We have to do some special stuff when we import SMILES (currently # just adding H's, but could get fancier in the future). It might be # worthwhile to parse the SMILES file ourselves and pass each SMILES # through the from_smiles function instead for rdmol in Chem.SmilesMolSupplier(file_path, titleLine=False): rdmol = Chem.AddHs(rdmol) mol = self.from_rdkit( rdmol, allow_undefined_stereo=allow_undefined_stereo, _cls=_cls ) mols.append(mol) elif file_format == "PDB": raise Exception( "RDKit can not safely read PDBs on their own. Information about bond order and aromaticity " "is likely to be lost. To read a PDB using RDKit use Molecule.from_pdb_and_smiles()" ) # TODO: See if we can implement PDB+mol/smi combinations to get complete bond information. # testing to see if we can make a molecule from smiles and then use the PDB conformer as the geometry # and just reorder the molecule # https://github.com/openforcefield/openff-toolkit/issues/121 # rdmol = Chem.MolFromPDBFile(file_path, removeHs=False) # mol = Molecule.from_rdkit(rdmol, _cls=_cls) # mols.append(mol) # TODO: Add SMI, TDT(?) support return mols def from_file_obj( self, file_obj, file_format, allow_undefined_stereo=False, _cls=None ): """ Return an openff.toolkit.topology.Molecule from a file-like object (an object with a ".read()" method using this toolkit. .. warning :: This API is experimental and subject to change. Parameters ---------- file_obj : file-like object The file-like object to read the molecule from file_format : str Format specifier, usually file suffix (eg. 'MOL2', 'SMI') Note that not all toolkits support all formats. Check ToolkitWrapper.toolkit_file_read_formats for details. allow_undefined_stereo : bool, default=False If false, raises an exception if oemol contains undefined stereochemistry. _cls : class Molecule constructor Returns ------- molecules : Molecule or list of Molecules a list of Molecule objects is returned. """ from rdkit import Chem mols = [] if (file_format == "MOL") or (file_format == "SDF"): # TODO: Iterate over all mols in file_data for rdmol in Chem.ForwardSDMolSupplier(file_obj): mol = self.from_rdkit(rdmol, _cls=_cls) mols.append(mol) if file_format == "SMI": # TODO: Find a cleaner way to parse SMILES lines file_data = file_obj.read() lines = [line.strip() for line in file_data.split("\n")] # remove blank lines lines.remove("") for line in lines: mol = self.from_smiles(line, _cls=_cls) mols.append(mol) elif file_format == "PDB": raise Exception( "RDKit can not safely read PDBs on their own. Information about bond order and aromaticity " "is likely to be lost. To read a PDB using RDKit use Molecule.from_pdb_and_smiles()" ) # TODO: See if we can implement PDB+mol/smi combinations to get complete bond information. # https://github.com/openforcefield/openff-toolkit/issues/121 # file_data = file_obj.read() # rdmol = Chem.MolFromPDBBlock(file_data) # mol = Molecule.from_rdkit(rdmol, _cls=_cls) # mols.append(mol) # TODO: TDT file support return mols def to_file_obj(self, molecule, file_obj, file_format): """ Writes an OpenFF Molecule to a file-like object Parameters ---------- molecule : an OpenFF Molecule The molecule to write file_obj The file-like object to write to file_format The format for writing the molecule data Returns ------- """ file_format = file_format.upper() rdmol = self.to_rdkit(molecule) try: writer = self._toolkit_file_write_formats[file_format](file_obj) writer.write(rdmol) writer.close() # if we can not write to that file type catch the error here except KeyError: raise ValueError( f"The requested file type ({file_format}) is not supported to be written using " f"RDKitToolkitWrapper." ) def to_file(self, molecule, file_path, file_format): """ Writes an OpenFF Molecule to a file-like object Parameters ---------- molecule : an OpenFF Molecule The molecule to write file_path The file path to write to file_format The format for writing the molecule data Returns ------ """ # open a file object and pass to the object writer with open(file_path, "w") as file_obj: self.to_file_obj( molecule=molecule, file_obj=file_obj, file_format=file_format ) def enumerate_stereoisomers( self, molecule, undefined_only=False, max_isomers=20, rationalise=True ): """ Enumerate the stereocenters and bonds of the current molecule. Parameters ---------- molecule: openff.toolkit.topology.Molecule The molecule whose state we should enumerate undefined_only: bool optional, default=False If we should enumerate all stereocenters and bonds or only those with undefined stereochemistry max_isomers: int optional, default=20 The maximum amount of molecules that should be returned rationalise: bool optional, default=True If we should try to build and rationalise the molecule to ensure it can exist Returns -------- molecules: List[openff.toolkit.topology.Molecule] A list of openff.toolkit.topology.Molecule instances """ from rdkit import Chem from rdkit.Chem.EnumerateStereoisomers import ( EnumerateStereoisomers, StereoEnumerationOptions, ) # create the molecule rdmol = self.to_rdkit(molecule=molecule) # in case any bonds/centers are missing stereo chem flag it here Chem.AssignStereochemistry( rdmol, cleanIt=True, force=True, flagPossibleStereoCenters=True ) Chem.FindPotentialStereoBonds(rdmol) # set up the options stereo_opts = StereoEnumerationOptions( tryEmbedding=rationalise, onlyUnassigned=undefined_only, maxIsomers=max_isomers, ) isomers = tuple(EnumerateStereoisomers(rdmol, options=stereo_opts)) molecules = [] for isomer in isomers: # isomer has CIS/TRANS tags so convert back to E/Z Chem.SetDoubleBondNeighborDirections(isomer) Chem.AssignStereochemistry(isomer, force=True, cleanIt=True) mol = self.from_rdkit(isomer, _cls=molecule.__class__) if mol != molecule: molecules.append(mol) return molecules def enumerate_tautomers(self, molecule, max_states=20): """ Enumerate the possible tautomers of the current molecule. Parameters ---------- molecule: openff.toolkit.topology.Molecule The molecule whose state we should enumerate max_states: int optional, default=20 The maximum amount of molecules that should be returned Returns ------- molecules: List[openff.toolkit.topology.Molecule] A list of openff.toolkit.topology.Molecule instances not including the input molecule. """ from rdkit import Chem from rdkit.Chem.MolStandardize import rdMolStandardize enumerator = rdMolStandardize.TautomerEnumerator() enumerator.SetMaxTautomers(max_states) rdmol = Chem.RemoveHs(molecule.to_rdkit()) tautomers = enumerator.Enumerate(rdmol) # make a list of OpenFF molecules excluding the input molecule molecules = [] for taut in tautomers: taut_hs = Chem.AddHs(taut) mol = self.from_smiles( Chem.MolToSmiles(taut_hs), allow_undefined_stereo=True ) if mol != molecule: molecules.append(mol) return molecules[:max_states] def canonical_order_atoms(self, molecule): """ Canonical order the atoms in the molecule using the RDKit. Parameters ---------- molecule: openff.toolkit.topology.Molecule The input molecule Returns ------- molecule : openff.toolkit.topology.Molecule The input molecule, with canonically-indexed atoms and bonds. """ from rdkit import Chem rdmol = self.to_rdkit(molecule) # get the canonical ordering with hydrogens first # this is the default behaviour of RDKit atom_order = list(Chem.CanonicalRankAtoms(rdmol, breakTies=True)) heavy_atoms = rdmol.GetNumHeavyAtoms() hydrogens = rdmol.GetNumAtoms() - heavy_atoms # now go through and change the rankings to get the heavy atoms first if hydrogens are present if hydrogens != 0: for i in range(len(atom_order)): if rdmol.GetAtomWithIdx(i).GetAtomicNum() != 1: atom_order[i] -= hydrogens else: atom_order[i] += heavy_atoms # make an atom mapping from the atom_order and remap the molecule atom_mapping = dict((i, rank) for i, rank in enumerate(atom_order)) return molecule.remap(atom_mapping, current_to_new=True) def to_smiles(self, molecule, isomeric=True, explicit_hydrogens=True, mapped=False): """ Uses the RDKit toolkit to convert a Molecule into a SMILES string. A partially mapped smiles can also be generated for atoms of interest by supplying an `atom_map` to the properties dictionary. Parameters ---------- molecule : An openff.toolkit.topology.Molecule The molecule to convert into a SMILES. isomeric: bool optional, default= True return an isomeric smiles explicit_hydrogens: bool optional, default=True return a smiles string containing all hydrogens explicitly mapped: bool optional, default=False return a explicit hydrogen mapped smiles, the atoms to be mapped can be controlled by supplying an atom map into the properties dictionary. If no mapping is passed all atoms will be mapped in order, else an atom map dictionary from the current atom index to the map id should be supplied with no duplicates. The map ids (values) should start from 0 or 1. Returns ------- smiles : str The SMILES of the input molecule. """ from rdkit import Chem rdmol = self.to_rdkit(molecule) if not explicit_hydrogens: # remove the hydrogens from the molecule rdmol = Chem.RemoveHs(rdmol) if mapped: assert explicit_hydrogens is True, ( "Mapped smiles require all hydrogens and " "stereochemistry to be defined to retain order" ) # if we only want to map specific atoms check for an atom map atom_map = molecule._properties.get("atom_map", None) if atom_map is not None: # make sure there are no repeated indices map_ids = set(atom_map.values()) if len(map_ids) < len(atom_map): atom_map = None elif 0 in atom_map.values(): # we need to increment the map index for atom, map in atom_map.items(): atom_map[atom] = map + 1 if atom_map is None: # now we need to add the indexing to the rdmol to get it in the smiles for atom in rdmol.GetAtoms(): # the mapping must start from 1, as RDKit uses 0 to represent no mapping. atom.SetAtomMapNum(atom.GetIdx() + 1) else: for atom in rdmol.GetAtoms(): try: # try to set the atom map map_idx = atom_map[atom.GetIdx()] atom.SetAtomMapNum(map_idx) except KeyError: continue return Chem.MolToSmiles( rdmol, isomericSmiles=isomeric, allHsExplicit=explicit_hydrogens ) def from_smiles( self, smiles, hydrogens_are_explicit=False, allow_undefined_stereo=False, _cls=None, ): """ Create a Molecule from a SMILES string using the RDKit toolkit. .. warning :: This API is experimental and subject to change. Parameters ---------- smiles : str The SMILES string to turn into a molecule hydrogens_are_explicit : bool, default=False If False, RDKit will perform hydrogen addition using Chem.AddHs allow_undefined_stereo : bool, default=False Whether to accept SMILES with undefined stereochemistry. If False, an exception will be raised if a SMILES with undefined stereochemistry is passed into this function. _cls : class Molecule constructor Returns ------- molecule : openff.toolkit.topology.Molecule An OpenFF style molecule. """ from rdkit import Chem rdmol = Chem.MolFromSmiles(smiles, sanitize=False) # strip the atom map from the molecule if it has one # so we don't affect the sterochemistry tags for atom in rdmol.GetAtoms(): if atom.GetAtomMapNum() != 0: # set the map back to zero but hide the index in the atom prop data atom.SetProp("_map_idx", str(atom.GetAtomMapNum())) # set it back to zero atom.SetAtomMapNum(0) # Chem.SanitizeMol calls updatePropertyCache so we don't need to call it ourselves # https://www.rdkit.org/docs/cppapi/namespaceRDKit_1_1MolOps.html#a8d831787aaf2d65d9920c37b25b476f5 Chem.SanitizeMol( rdmol, Chem.SANITIZE_ALL ^ Chem.SANITIZE_ADJUSTHS ^ Chem.SANITIZE_SETAROMATICITY, ) Chem.SetAromaticity(rdmol, Chem.AromaticityModel.AROMATICITY_MDL) # Chem.MolFromSmiles adds bond directions (i.e. ENDDOWNRIGHT/ENDUPRIGHT), but # doesn't set bond.GetStereo(). We need to call AssignStereochemistry for that. Chem.AssignStereochemistry(rdmol) # Throw an exception/warning if there is unspecified stereochemistry. if not allow_undefined_stereo: self._detect_undefined_stereo( rdmol, err_msg_prefix="Unable to make OFFMol from SMILES: " ) # Add explicit hydrogens if they aren't there already if not hydrogens_are_explicit: rdmol = Chem.AddHs(rdmol) elif hydrogens_are_explicit: for atom_idx in range(rdmol.GetNumAtoms()): atom = rdmol.GetAtomWithIdx(atom_idx) if atom.GetNumImplicitHs() != 0: raise ValueError( f"'hydrogens_are_explicit' was specified as True, but RDKit toolkit interpreted " f"SMILES '{smiles}' as having implicit hydrogen. If this SMILES is intended to " f"express all explicit hydrogens in the molecule, then you should construct the " f"desired molecule as an RDMol with no implicit hydrogens, and then use " f"Molecule.from_rdkit() to create the desired OFFMol." ) molecule = self.from_rdkit( rdmol, _cls=_cls, allow_undefined_stereo=allow_undefined_stereo, hydrogens_are_explicit=hydrogens_are_explicit, ) return molecule def from_inchi(self, inchi, allow_undefined_stereo=False, _cls=None): """ Construct a Molecule from a InChI representation Parameters ---------- inchi : str The InChI representation of the molecule. allow_undefined_stereo : bool, default=False Whether to accept InChI with undefined stereochemistry. If False, an exception will be raised if a InChI with undefined stereochemistry is passed into this function. _cls : class Molecule constructor Returns ------- molecule : openff.toolkit.topology.Molecule """ from rdkit import Chem # this seems to always remove the hydrogens rdmol = Chem.MolFromInchi(inchi, sanitize=False, removeHs=False) # try and catch an InChI parsing error if rdmol is None: raise RuntimeError( "There was an issue parsing the InChI string, please check and try again." ) # process the molecule # TODO do we need this with inchi? rdmol.UpdatePropertyCache(strict=False) Chem.SanitizeMol( rdmol, Chem.SANITIZE_ALL ^ Chem.SANITIZE_ADJUSTHS ^ Chem.SANITIZE_SETAROMATICITY, ) Chem.SetAromaticity(rdmol, Chem.AromaticityModel.AROMATICITY_MDL) # add hydrogens back here rdmol = Chem.AddHs(rdmol) molecule = self.from_rdkit( rdmol, allow_undefined_stereo=allow_undefined_stereo, _cls=_cls ) return molecule def generate_conformers( self, molecule, n_conformers=1, rms_cutoff=None, clear_existing=True, _cls=None ): """ Generate molecule conformers using RDKit. .. warning :: This API is experimental and subject to change. .. todo :: * which parameters should we expose? (or can we implement a general system with \*\*kwargs?) * will the coordinates be returned in the OpenFF Molecule's own indexing system? Or is there a chance that they'll get reindexed when we convert the input into an RDMol? Parameters ---------- molecule : a :class:`Molecule` The molecule to generate conformers for. n_conformers : int, default=1 Maximum number of conformers to generate. rms_cutoff : simtk.Quantity-wrapped float, in units of distance, optional, default=None The minimum RMS value at which two conformers are considered redundant and one is deleted. If None, the cutoff is set to 1 Angstrom clear_existing : bool, default=True Whether to overwrite existing conformers for the molecule. _cls : class Molecule constructor """ from rdkit.Chem import AllChem if rms_cutoff is None: rms_cutoff = 1.0 * unit.angstrom rdmol = self.to_rdkit(molecule) # TODO: This generates way more conformations than omega, given the same nConfs and RMS threshold. Is there some way to set an energy cutoff as well? AllChem.EmbedMultipleConfs( rdmol, numConfs=n_conformers, pruneRmsThresh=rms_cutoff / unit.angstrom, randomSeed=1, # params=AllChem.ETKDG() ) molecule2 = self.from_rdkit( rdmol, allow_undefined_stereo=True, _cls=molecule.__class__ ) if clear_existing: molecule._conformers = list() for conformer in molecule2._conformers: molecule._add_conformer(conformer) def assign_partial_charges( self, molecule, partial_charge_method=None, use_conformers=None, strict_n_conformers=False, _cls=None, ): """ Compute partial charges with RDKit, and assign the new values to the partial_charges attribute. .. warning :: This API is experimental and subject to change. Parameters ---------- molecule : openff.toolkit.topology.Molecule Molecule for which partial charges are to be computed partial_charge_method : str, optional, default=None The charge model to use. One of ['mmff94']. If None, 'mmff94' will be used. * 'mmff94': Applies partial charges using the Merck Molecular Force Field (MMFF). This method does not make use of conformers, and hence ``use_conformers`` and ``strict_n_conformers`` will not impact the partial charges produced. use_conformers : iterable of simtk.unit.Quantity-wrapped numpy arrays, each with shape (n_atoms, 3) and dimension of distance. Optional, default = None Coordinates to use for partial charge calculation. If None, an appropriate number of conformers will be generated. strict_n_conformers : bool, default=False Whether to raise an exception if an invalid number of conformers is provided for the given charge method. If this is False and an invalid number of conformers is found, a warning will be raised. _cls : class Molecule constructor Raises ------ ChargeMethodUnavailableError if the requested charge method can not be handled by this toolkit ChargeCalculationError if the charge method is supported by this toolkit, but fails """ import numpy as np from rdkit.Chem import AllChem SUPPORTED_CHARGE_METHODS = {"mmff94"} if partial_charge_method is None: partial_charge_method = "mmff94" partial_charge_method = partial_charge_method.lower() if partial_charge_method not in SUPPORTED_CHARGE_METHODS: raise ChargeMethodUnavailableError( f"partial_charge_method '{partial_charge_method}' is not available from RDKitToolkitWrapper. " f"Available charge methods are {list(SUPPORTED_CHARGE_METHODS)} " ) rdkit_molecule = molecule.to_rdkit() charges = None if partial_charge_method == "mmff94": mmff_properties = AllChem.MMFFGetMoleculeProperties( rdkit_molecule, "MMFF94" ) charges = np.array( [ mmff_properties.GetMMFFPartialCharge(i) for i in range(molecule.n_atoms) ] ) molecule.partial_charges = charges * unit.elementary_charge @classmethod def _elf_is_problematic_conformer( cls, molecule: "Molecule", conformer: unit.Quantity ) -> Tuple[bool, Optional[str]]: """A function which checks if a particular conformer is known to be problematic when computing ELF partial charges. Currently this includes conformers which: * contain a trans-COOH configuration. The trans conformer is discarded because it leads to strong electrostatic interactions when assigning charges, and these result in unreasonable charges. Downstream calculations have observed up to a 4 log unit error in water-octanol logP calculations when using charges assigned from trans conformers. Returns ------- A tuple of a bool stating whether the conformer is problematic and, if it is, a string message explaing why. If the conformer is not problematic, the second return value will be none. """ from rdkit.Chem.rdMolTransforms import GetDihedralRad # Create a copy of the molecule which contains only this conformer. molecule_copy = copy.deepcopy(molecule) molecule_copy._conformers = [conformer] rdkit_molecule = molecule_copy.to_rdkit() # Check for trans-COOH configurations carboxylic_acid_matches = cls._find_smarts_matches( rdkit_molecule, "[#6X3:2](=[#8:1])(-[#8X2H1:3]-[#1:4])" ) for match in carboxylic_acid_matches: dihedral_angle = GetDihedralRad(rdkit_molecule.GetConformer(0), *match) if dihedral_angle > np.pi / 2.0: # Discard the 'trans' conformer. return ( True, "Molecules which contain COOH functional groups in a trans " "configuration are discarded by the ELF method.", ) return False, None @classmethod def _elf_prune_problematic_conformers( cls, molecule: "Molecule" ) -> List[unit.Quantity]: """A function which attempts to remove conformers which are known to be problematic when computing ELF partial charges. Currently this includes conformers which: * contain a trans-COOH configuration. These conformers ... TODO add reason. Notes ----- * Problematic conformers are flagged by the ``RDKitToolkitWrapper._elf_is_problematic_conformer`` function. Returns ------- The conformers to retain. """ valid_conformers = [] for i, conformer in enumerate(molecule.conformers): is_problematic, reason = cls._elf_is_problematic_conformer( molecule, conformer ) if is_problematic: logger.warning(f"Discarding conformer {i}: {reason}") else: valid_conformers.append(conformer) return valid_conformers @classmethod def _elf_compute_electrostatic_energy( cls, molecule: "Molecule", conformer: unit.Quantity ) -> float: """Computes the 'electrostatic interaction energy' of a particular conformer of a molecule. The energy is computed as the sum of ``|q_i * q_j| * r_ij^-1`` over all pairs of atoms (i, j) excluding 1-2 and 1-3 terms, where q_i is the partial charge of atom i and r_ij the Euclidean distance between atoms i and j. Notes ----- * The partial charges will be taken from the molecule directly. Parameters ---------- molecule The molecule containing the partial charges. conformer The conformer to compute the energy of. This should be a unit wrapped numpy array with shape=(n_atoms, 3) with units compatible with angstroms. Returns ------- The electrostatic interaction energy in units of [e^2 / Angstrom]. """ if molecule.partial_charges is None: raise ValueError("The molecule has no partial charges assigned.") partial_charges = np.abs( molecule.partial_charges.value_in_unit(unit.elementary_charge) ).reshape(-1, 1) # Build an exclusion list for 1-2 and 1-3 interactions. excluded_pairs = { *[(bond.atom1_index, bond.atom2_index) for bond in molecule.bonds], *[ (angle[0].molecule_atom_index, angle[-1].molecule_atom_index) for angle in molecule.angles ], } # Build the distance matrix between all pairs of atoms. coordinates = conformer.value_in_unit(unit.angstrom) distances = np.sqrt( np.sum(np.square(coordinates)[:, np.newaxis, :], axis=2) - 2 * coordinates.dot(coordinates.T) + np.sum(np.square(coordinates), axis=1) ) # Handle edge cases where the squared distance is slightly negative due to # precision issues np.fill_diagonal(distances, 0.0) inverse_distances = np.reciprocal( distances, out=np.zeros_like(distances), where=~np.isclose(distances, 0.0) ) # Multiply by the charge products. charge_products = partial_charges @ partial_charges.T for x, y in excluded_pairs: charge_products[x, y] = 0.0 charge_products[y, x] = 0.0 interaction_energies = inverse_distances * charge_products return 0.5 * interaction_energies.sum() @classmethod def _elf_compute_rms_matrix(cls, molecule: "Molecule") -> np.ndarray: """Computes the symmetric RMS matrix of all conformers in a molecule taking only heavy atoms into account. Parameters ---------- molecule The molecule containing the conformers. Returns ------- The RMS matrix with shape=(n_conformers, n_conformers). """ from rdkit import Chem from rdkit.Chem import AllChem rdkit_molecule: Chem.RWMol = Chem.RemoveHs(molecule.to_rdkit()) n_conformers = len(molecule.conformers) conformer_ids = [conf.GetId() for conf in rdkit_molecule.GetConformers()] # Compute the RMS matrix making sure to take into account any automorhism (e.g # a phenyl or nitro substituent flipped 180 degrees. rms_matrix = np.zeros((n_conformers, n_conformers)) for i, j in itertools.combinations(conformer_ids, 2): rms_matrix[i, j] = AllChem.GetBestRMS( rdkit_molecule, rdkit_molecule, conformer_ids[i], conformer_ids[j], ) rms_matrix += rms_matrix.T return rms_matrix @classmethod def _elf_select_diverse_conformers( cls, molecule: "Molecule", ranked_conformers: List[unit.Quantity], limit: int, rms_tolerance: unit.Quantity, ) -> List[unit.Quantity]: """Attempt to greedily select a specified number conformers which are maximally diverse. The conformer with the lowest electrostatic energy (the first conformer in the ``ranked_conformers`` list) is always chosen. After that selection proceeds by: a) selecting an un-selected conformer which is the most different from those already selected, and whose RMS compared to each selected conformer is greater than ``rms_tolerance``. Here most different means the conformer which has the largest sum of RMS with the selected conformers. b) repeating a) until either ``limit`` number of conformers have been selected, or there are no more distinct conformers to select from. Notes ----- * As the selection is greedy there is no guarantee that the selected conformers will be the optimal distinct i.e. there may be other selections of conformers which are more distinct. Parameters ---------- molecule The molecule object which matches the conformers to select from. ranked_conformers A list of conformers to select from, ranked by their electrostatic interaction energy (see ``_compute_electrostatic_energy``). limit The maximum number of conformers to select. rms_tolerance Conformers whose RMS is within this amount will be treated as identical and the duplicate discarded. Returns ------- The select list of conformers. """ # Compute the RMS between all pairs of conformers molecule = copy.deepcopy(molecule) molecule.conformers.clear() for conformer in ranked_conformers: molecule.add_conformer(conformer) rms_matrix = cls._elf_compute_rms_matrix(molecule) # Apply the greedy selection process. closed_list = np.zeros(limit).astype(int) closed_mask = np.zeros(rms_matrix.shape[0], dtype=bool) n_selected = 1 for i in range(min(molecule.n_conformers, limit - 1)): distances = rms_matrix[closed_list[: i + 1], :].sum(axis=0) # Exclude already selected conformers or conformers which are too similar # to those already selected. closed_mask[ np.any( rms_matrix[closed_list[: i + 1], :] < rms_tolerance.value_in_unit(unit.angstrom), axis=0, ) ] = True if np.all(closed_mask): # Stop of there are no more distinct conformers to select from. break distant_index = np.ma.array(distances, mask=closed_mask).argmax() closed_list[i + 1] = distant_index n_selected += 1 return [ranked_conformers[i.item()] for i in closed_list[:n_selected]] def apply_elf_conformer_selection( self, molecule: "Molecule", percentage: float = 2.0, limit: int = 10, rms_tolerance: unit.Quantity = 0.05 * unit.angstrom, ): """Applies the `ELF method <https://docs.eyesopen.com/toolkits/python/quacpactk/molchargetheory.html#elf-conformer-selection>`_ to select a set of diverse conformers which have minimal electrostatically strongly interacting functional groups from a molecules conformers. The diverse conformer selection is performed by the ``_elf_select_diverse_conformers`` function, which attempts to greedily select conformers which are most distinct according to their RMS. Warnings -------- * Although this function is inspired by the OpenEye ELF10 method, this implementation may yield slightly different conformers due to potential differences in this and the OE closed source implementation. Notes ----- * The input molecule should have a large set of conformers already generated to select the ELF10 conformers from. * The selected conformers will be retained in the `molecule.conformers` list while unselected conformers will be discarded. * Only heavy atoms are included when using the RMS to select diverse conformers. See Also -------- RDKitToolkitWrapper._elf_select_diverse_conformers Parameters ---------- molecule The molecule which contains the set of conformers to select from. percentage The percentage of conformers with the lowest electrostatic interaction energies to greedily select from. limit The maximum number of conformers to select. rms_tolerance Conformers whose RMS is within this amount will be treated as identical and the duplicate discarded. """ if molecule.n_conformers == 0: return # Copy the input molecule so we can directly perturb it within the method. molecule_copy = copy.deepcopy(molecule) # Prune any problematic conformers, such as trans-COOH configurations. conformers = self._elf_prune_problematic_conformers(molecule_copy) if len(conformers) == 0: raise ValueError( "There were no conformers to select from after discarding conformers " "which are known to be problematic when computing ELF partial charges. " "Make sure to generate a diverse array of conformers before calling the " "`RDKitToolkitWrapper.apply_elf_conformer_selection` method." ) # Generate a set of absolute MMFF94 partial charges for the molecule and use # these to compute the electrostatic interaction energy of each conformer. self.assign_partial_charges(molecule_copy, "mmff94") conformer_energies = [ ( self._elf_compute_electrostatic_energy(molecule_copy, conformer), conformer, ) for conformer in conformers ] # Rank the conformer energies and retain `percentage`% with the lowest energies. conformer_energies = sorted(conformer_energies, key=lambda x: x[0]) cutoff_index = max(1, int(len(conformer_energies) * percentage / 100.0)) low_energy_conformers = [ conformer for _, conformer in conformer_energies[:cutoff_index] ] # Attempt to greedily select `limit` conformers which are maximally diverse. diverse_conformers = self._elf_select_diverse_conformers( molecule_copy, low_energy_conformers, limit, rms_tolerance ) molecule._conformers = diverse_conformers def from_rdkit( self, rdmol, allow_undefined_stereo=False, hydrogens_are_explicit=False, _cls=None, ): """ Create a Molecule from an RDKit molecule. Requires the RDKit to be installed. .. warning :: This API is experimental and subject to change. Parameters ---------- rdmol : rkit.RDMol An RDKit molecule allow_undefined_stereo : bool, default=False If false, raises an exception if rdmol contains undefined stereochemistry. hydrogens_are_explicit : bool, default=False If False, RDKit will perform hydrogen addition using Chem.AddHs _cls : class Molecule constructor Returns ------- molecule : openff.toolkit.topology.Molecule An OpenFF molecule Examples -------- Create a molecule from an RDKit molecule >>> from rdkit import Chem >>> from openff.toolkit.tests.utils import get_data_file_path >>> rdmol = Chem.MolFromMolFile(get_data_file_path('systems/monomers/ethanol.sdf')) >>> toolkit_wrapper = RDKitToolkitWrapper() >>> molecule = toolkit_wrapper.from_rdkit(rdmol) """ from rdkit import Chem if _cls is None: from openff.toolkit.topology.molecule import Molecule _cls = Molecule # Make a copy of the RDKit Mol as we'll need to change it (e.g. assign stereo). rdmol = Chem.Mol(rdmol) if not hydrogens_are_explicit: rdmol = Chem.AddHs(rdmol, addCoords=True) # Sanitizing the molecule. We handle aromaticity and chirality manually. # This SanitizeMol(...) calls cleanUp, updatePropertyCache, symmetrizeSSSR, # assignRadicals, setConjugation, and setHybridization. Chem.SanitizeMol( rdmol, ( Chem.SANITIZE_ALL ^ Chem.SANITIZE_SETAROMATICITY ^ Chem.SANITIZE_ADJUSTHS ^ Chem.SANITIZE_CLEANUPCHIRALITY ^ Chem.SANITIZE_KEKULIZE ), ) Chem.SetAromaticity(rdmol, Chem.AromaticityModel.AROMATICITY_MDL) # SetAromaticity set aromatic bonds to 1.5, but Molecule.bond_order is an # integer (contrarily to fractional_bond_order) so we need the Kekule order. Chem.Kekulize(rdmol) # Make sure the bond stereo tags are set before checking for # undefined stereo. RDKit can figure out bond stereo from other # information in the Mol object like bond direction properties. # Do not overwrite eventual chiral tags provided by the user. Chem.AssignStereochemistry(rdmol, cleanIt=False) # Check for undefined stereochemistry. self._detect_undefined_stereo( rdmol, raise_warning=allow_undefined_stereo, err_msg_prefix="Unable to make OFFMol from RDMol: ", ) # Create a new OpenFF Molecule offmol = _cls() # If RDMol has a title save it if rdmol.HasProp("_Name"): # raise Exception('{}'.format(rdmol.GetProp('name'))) offmol.name = rdmol.GetProp("_Name") else: offmol.name = "" # Store all properties # TODO: Should there be an API point for storing properties? properties = rdmol.GetPropsAsDict() offmol._properties = properties # setting chirality in openeye requires using neighbor atoms # therefore we can't do it until after the atoms and bonds are all added map_atoms = {} map_bonds = {} # if we are loading from a mapped smiles extract the mapping atom_mapping = {} for rda in rdmol.GetAtoms(): rd_idx = rda.GetIdx() # if the molecule was made from a mapped smiles this has been hidden # so that it does not affect the sterochemistry tags try: map_id = int(rda.GetProp("_map_idx")) except KeyError: map_id = rda.GetAtomMapNum() # create a new atom # atomic_number = oemol.NewAtom(rda.GetAtomicNum()) atomic_number = rda.GetAtomicNum() formal_charge = rda.GetFormalCharge() * unit.elementary_charge is_aromatic = rda.GetIsAromatic() if rda.HasProp("_Name"): name = rda.GetProp("_Name") else: # check for PDB names try: name = rda.GetMonomerInfo().GetName().strip() except AttributeError: name = "" # If chiral, store the chirality to be set later stereochemistry = None # tag = rda.GetChiralTag() if rda.HasProp("_CIPCode"): stereo_code = rda.GetProp("_CIPCode") # if tag == Chem.CHI_TETRAHEDRAL_CCW: if stereo_code == "R": stereochemistry = "R" # if tag == Chem.CHI_TETRAHEDRAL_CW: elif stereo_code == "S": stereochemistry = "S" else: raise UndefinedStereochemistryError( "In from_rdkit: Expected atom stereochemistry of R or S. " "Got {} instead.".format(stereo_code) ) atom_index = offmol._add_atom( atomic_number, formal_charge, is_aromatic, name=name, stereochemistry=stereochemistry, ) map_atoms[rd_idx] = atom_index atom_mapping[atom_index] = map_id # If we have a full / partial atom map add it to the molecule. Zeroes 0 # indicates no mapping if {*atom_mapping.values()} != {0}: offmol._properties["atom_map"] = { idx: map_idx for idx, map_idx in atom_mapping.items() if map_idx != 0 } # Similar to chirality, stereochemistry of bonds in OE is set relative to their neighbors for rdb in rdmol.GetBonds(): rdb_idx = rdb.GetIdx() a1 = rdb.GetBeginAtomIdx() a2 = rdb.GetEndAtomIdx() # Determine bond aromaticity and Kekulized bond order is_aromatic = rdb.GetIsAromatic() order = rdb.GetBondTypeAsDouble() # Convert floating-point bond order to integral bond order order = int(order) # create a new bond bond_index = offmol._add_bond( map_atoms[a1], map_atoms[a2], order, is_aromatic ) map_bonds[rdb_idx] = bond_index # Now fill in the cached (structure-dependent) properties. We have to have the 2D structure of the molecule # in place first, because each call to add_atom and add_bond invalidates all cached properties for rdb in rdmol.GetBonds(): rdb_idx = rdb.GetIdx() offb_idx = map_bonds[rdb_idx] offb = offmol.bonds[offb_idx] # determine if stereochemistry is needed # Note that RDKit has 6 possible values of bond stereo: CIS, TRANS, E, Z, ANY, or NONE # The logic below assumes that "ANY" and "NONE" mean the same thing. stereochemistry = None tag = rdb.GetStereo() if tag == Chem.BondStereo.STEREOZ: stereochemistry = "Z" elif tag == Chem.BondStereo.STEREOE: stereochemistry = "E" elif tag == Chem.BondStereo.STEREOTRANS or tag == Chem.BondStereo.STEREOCIS: raise ValueError( "Expected RDKit bond stereochemistry of E or Z, got {} instead".format( tag ) ) offb._stereochemistry = stereochemistry fractional_bond_order = None if rdb.HasProp("fractional_bond_order"): fractional_bond_order = rdb.GetDoubleProp("fractional_bond_order") offb.fractional_bond_order = fractional_bond_order # TODO: Save conformer(s), if present # If the rdmol has a conformer, store its coordinates if len(rdmol.GetConformers()) != 0: for conf in rdmol.GetConformers(): n_atoms = offmol.n_atoms # TODO: Will this always be angstrom when loading from RDKit? positions = unit.Quantity(np.zeros((n_atoms, 3)), unit.angstrom) for rd_idx, off_idx in map_atoms.items(): atom_coords = conf.GetPositions()[rd_idx, :] * unit.angstrom positions[off_idx, :] = atom_coords offmol._add_conformer(positions) partial_charges = unit.Quantity( np.zeros(shape=offmol.n_atoms, dtype=np.float64), unit=unit.elementary_charge, ) any_atom_has_partial_charge = False for rd_idx, rd_atom in enumerate(rdmol.GetAtoms()): off_idx = map_atoms[rd_idx] if rd_atom.HasProp("PartialCharge"): charge = rd_atom.GetDoubleProp("PartialCharge") * unit.elementary_charge partial_charges[off_idx] = charge any_atom_has_partial_charge = True else: # If some other atoms had partial charges but this one doesn't, raise an Exception if any_atom_has_partial_charge: raise ValueError( "Some atoms in rdmol have partial charges, but others do not." ) if any_atom_has_partial_charge: offmol.partial_charges = partial_charges else: offmol.partial_charges = None return offmol @classmethod def to_rdkit(cls, molecule, aromaticity_model=DEFAULT_AROMATICITY_MODEL): """ Create an RDKit molecule Requires the RDKit to be installed. .. warning :: This API is experimental and subject to change. Parameters ---------- aromaticity_model : str, optional, default=DEFAULT_AROMATICITY_MODEL The aromaticity model to use Returns ------- rdmol : rkit.RDMol An RDKit molecule Examples -------- Convert a molecule to RDKit >>> from openff.toolkit.topology import Molecule >>> ethanol = Molecule.from_smiles('CCO') >>> rdmol = ethanol.to_rdkit() """ from rdkit import Chem, Geometry # Create an editable RDKit molecule rdmol = Chem.RWMol() # Set name # TODO: What is the best practice for how this should be named? if not (molecule.name is None): rdmol.SetProp("_Name", molecule.name) # TODO: Set other properties for name, value in molecule.properties.items(): if type(value) == str: rdmol.SetProp(name, value) elif type(value) == int: rdmol.SetIntProp(name, value) elif type(value) == float: rdmol.SetDoubleProp(name, value) elif type(value) == bool: rdmol.SetBoolProp(name, value) else: # Shove everything else into a string rdmol.SetProp(name, str(value)) _bondtypes = { 1: Chem.BondType.SINGLE, 1.5: Chem.BondType.AROMATIC, 2: Chem.BondType.DOUBLE, 3: Chem.BondType.TRIPLE, 4: Chem.BondType.QUADRUPLE, 5: Chem.BondType.QUINTUPLE, 6: Chem.BondType.HEXTUPLE, 7: Chem.BondType.ONEANDAHALF, } for index, atom in enumerate(molecule.atoms): rdatom = Chem.Atom(atom.atomic_number) rdatom.SetFormalCharge( atom.formal_charge.value_in_unit(unit.elementary_charge) ) rdatom.SetIsAromatic(atom.is_aromatic) rdatom.SetProp("_Name", atom.name) ## Stereo handling code moved to after bonds are added if atom.stereochemistry == "S": rdatom.SetChiralTag(Chem.CHI_TETRAHEDRAL_CW) elif atom.stereochemistry == "R": rdatom.SetChiralTag(Chem.CHI_TETRAHEDRAL_CCW) rd_index = rdmol.AddAtom(rdatom) # Let's make sure al the atom indices in the two molecules # are the same, otherwise we need to create an atom map. assert index == atom.molecule_atom_index assert index == rd_index for bond in molecule.bonds: atom_indices = ( bond.atom1.molecule_atom_index, bond.atom2.molecule_atom_index, ) rdmol.AddBond(*atom_indices) rdbond = rdmol.GetBondBetweenAtoms(*atom_indices) if not (bond.fractional_bond_order is None): rdbond.SetDoubleProp( "fractional_bond_order", bond.fractional_bond_order ) # Assign bond type, which is based on order unless it is aromatic if bond.is_aromatic: rdbond.SetBondType(_bondtypes[1.5]) rdbond.SetIsAromatic(True) else: rdbond.SetBondType(_bondtypes[bond.bond_order]) rdbond.SetIsAromatic(False) Chem.SanitizeMol( rdmol, Chem.SANITIZE_ALL ^ Chem.SANITIZE_ADJUSTHS ^ Chem.SANITIZE_SETAROMATICITY, ) # Fix for aromaticity being lost if aromaticity_model == "OEAroModel_MDL": Chem.SetAromaticity(rdmol, Chem.AromaticityModel.AROMATICITY_MDL) else: raise ValueError(f"Aromaticity model {aromaticity_model} not recognized") # Assign atom stereochemsitry and collect atoms for which RDKit # can't figure out chirality. The _CIPCode property of these atoms # will be forcefully set to the stereo we want (see #196). undefined_stereo_atoms = {} for index, atom in enumerate(molecule.atoms): rdatom = rdmol.GetAtomWithIdx(index) # Skip non-chiral atoms. if atom.stereochemistry is None: continue # Let's randomly assign this atom's (local) stereo to CW # and check if this causes the (global) stereo to be set # to the desired one (S or R). rdatom.SetChiralTag(Chem.CHI_TETRAHEDRAL_CW) # We need to do force and cleanIt to recalculate CIP stereo. Chem.AssignStereochemistry(rdmol, force=True, cleanIt=True) # If our random initial assignment worked, then we're set. if ( rdatom.HasProp("_CIPCode") and rdatom.GetProp("_CIPCode") == atom.stereochemistry ): continue # Otherwise, set it to CCW. rdatom.SetChiralTag(Chem.CHI_TETRAHEDRAL_CCW) # We need to do force and cleanIt to recalculate CIP stereo. Chem.AssignStereochemistry(rdmol, force=True, cleanIt=True) # Hopefully this worked, otherwise something's wrong if ( rdatom.HasProp("_CIPCode") and rdatom.GetProp("_CIPCode") == atom.stereochemistry ): continue # Keep track of undefined stereo atoms. We'll force stereochemistry # at the end to avoid the next AssignStereochemistry to overwrite. if not rdatom.HasProp("_CIPCode"): undefined_stereo_atoms[rdatom] = atom.stereochemistry continue # Something is wrong. err_msg = ( "Unknown atom stereochemistry encountered in to_rdkit. " "Desired stereochemistry: {}. Set stereochemistry {}".format( atom.stereochemistry, rdatom.GetProp("_CIPCode") ) ) raise RuntimeError(err_msg) # Copy bond stereo info from molecule to rdmol. cls._assign_rdmol_bonds_stereo(molecule, rdmol) # Set coordinates if we have them if molecule._conformers: for conformer in molecule._conformers: rdmol_conformer = Chem.Conformer() for atom_idx in range(molecule.n_atoms): x, y, z = conformer[atom_idx, :].value_in_unit(unit.angstrom) rdmol_conformer.SetAtomPosition(atom_idx, Geometry.Point3D(x, y, z)) rdmol.AddConformer(rdmol_conformer, assignId=True) # Retain charges, if present if not (molecule._partial_charges is None): rdk_indexed_charges = np.zeros(shape=molecule.n_atoms, dtype=float) for atom_idx, charge in enumerate(molecule._partial_charges): charge_unitless = charge.value_in_unit(unit.elementary_charge) rdk_indexed_charges[atom_idx] = charge_unitless for atom_idx, rdk_atom in enumerate(rdmol.GetAtoms()): rdk_atom.SetDoubleProp("PartialCharge", rdk_indexed_charges[atom_idx]) # Note: We could put this outside the "if" statement, which would result in all partial charges in the # resulting file being set to "n/a" if they weren't set in the Open Force Field Toolkit ``Molecule`` Chem.CreateAtomDoublePropertyList(rdmol, "PartialCharge") # Cleanup the rdmol rdmol.UpdatePropertyCache(strict=False) Chem.GetSSSR(rdmol) # Forcefully assign stereo information on the atoms that RDKit # can't figure out. This must be done last as calling AssignStereochemistry # again will delete these properties (see #196). for rdatom, stereochemistry in undefined_stereo_atoms.items(): rdatom.SetProp("_CIPCode", stereochemistry) # Return non-editable version return Chem.Mol(rdmol) def to_inchi(self, molecule, fixed_hydrogens=False): """ Create an InChI string for the molecule using the RDKit Toolkit. InChI is a standardised representation that does not capture tautomers unless specified using the fixed hydrogen layer. For information on InChi see here https://iupac.org/who-we-are/divisions/division-details/inchi/ Parameters ---------- molecule : An openff.toolkit.topology.Molecule The molecule to convert into a SMILES. fixed_hydrogens: bool, default=False If a fixed hydrogen layer should be added to the InChI, if `True` this will produce a non standard specific InChI string of the molecule. Returns -------- inchi: str The InChI string of the molecule. """ from rdkit import Chem rdmol = self.to_rdkit(molecule) if fixed_hydrogens: inchi = Chem.MolToInchi(rdmol, options="-FixedH") else: inchi = Chem.MolToInchi(rdmol) return inchi def to_inchikey(self, molecule, fixed_hydrogens=False): """ Create an InChIKey for the molecule using the RDKit Toolkit. InChIKey is a standardised representation that does not capture tautomers unless specified using the fixed hydrogen layer. For information on InChi see here https://iupac.org/who-we-are/divisions/division-details/inchi/ Parameters ---------- molecule : An openff.toolkit.topology.Molecule The molecule to convert into a SMILES. fixed_hydrogens: bool, default=False If a fixed hydrogen layer should be added to the InChI, if `True` this will produce a non standard specific InChI string of the molecule. Returns -------- inchi_key: str The InChIKey representation of the molecule. """ from rdkit import Chem rdmol = self.to_rdkit(molecule) if fixed_hydrogens: inchi_key = Chem.MolToInchiKey(rdmol, options="-FixedH") else: inchi_key = Chem.MolToInchiKey(rdmol) return inchi_key def get_tagged_smarts_connectivity(self, smarts): """ Returns a tuple of tuples indicating connectivity between tagged atoms in a SMARTS string. Does not return bond order. Parameters ---------- smarts : str The tagged SMARTS to analyze Returns ------- unique_tags : tuple of int A sorted tuple of all unique tagged atom map indices. tagged_atom_connectivity : tuple of tuples of int, shape n_tagged_bonds x 2 A tuple of tuples, where each inner tuple is a pair of tagged atoms (tag_idx_1, tag_idx_2) which are bonded. The inner tuples are ordered smallest-to-largest, and the tuple of tuples is ordered lexically. So the return value for an improper torsion would be ((1, 2), (2, 3), (2, 4)). Raises ------ SMIRKSParsingError If RDKit was unable to parse the provided smirks/tagged smarts """ from rdkit import Chem from openff.toolkit.typing.chemistry import SMIRKSParsingError ss = Chem.MolFromSmarts(smarts) if ss is None: raise SMIRKSParsingError(f"RDKit was unable to parse SMIRKS {smarts}") unique_tags = set() connections = set() for at1 in ss.GetAtoms(): if at1.GetAtomMapNum() == 0: continue unique_tags.add(at1.GetAtomMapNum()) for at2 in at1.GetNeighbors(): if at2.GetAtomMapNum() == 0: continue cxn_to_add = sorted([at1.GetAtomMapNum(), at2.GetAtomMapNum()]) connections.add(tuple(cxn_to_add)) connections = tuple(sorted(list(connections))) unique_tags = tuple(sorted(list(unique_tags))) return unique_tags, connections @staticmethod def _find_smarts_matches(rdmol, smirks, aromaticity_model="OEAroModel_MDL"): """Find all sets of atoms in the provided RDKit molecule that match the provided SMARTS string. Parameters ---------- rdmol : rdkit.Chem.Mol rdmol to process with the SMIRKS in order to find matches smarts : str SMARTS string with any number of sequentially tagged atoms. If there are N tagged atoms numbered 1..N, the resulting matches will be N-tuples of atoms that match the corresponding tagged atoms. aromaticity_model : str, optional, default='OEAroModel_MDL' OpenEye aromaticity model designation as a string, such as ``OEAroModel_MDL``. Molecule is prepared with this aromaticity model prior to querying. Returns ------- matches : list of tuples of atoms indices within the ``rdmol`` matches[index] is an N-tuple of atom numbers from the ``rdmol`` Matches are returned in no guaranteed order. # TODO: What is returned if no matches are found? An empty list, or None? # TODO: Ensure that SMARTS numbers 1, 2, 3... are rendered into order of returnd matches indexed by 0, 1, 2... .. notes :: * Raises ``ValueError`` if ``smarts`` query is malformed """ from rdkit import Chem # Make a copy of the molecule rdmol = Chem.Mol(rdmol) # Use designated aromaticity model if aromaticity_model == "OEAroModel_MDL": Chem.SanitizeMol(rdmol, Chem.SANITIZE_ALL ^ Chem.SANITIZE_SETAROMATICITY) Chem.SetAromaticity(rdmol, Chem.AromaticityModel.AROMATICITY_MDL) else: # Only the OEAroModel_MDL is supported for now raise ValueError("Unknown aromaticity model: {}".aromaticity_models) # Set up query. qmol = Chem.MolFromSmarts(smirks) # cannot catch the error if qmol is None: raise ValueError( 'RDKit could not parse the SMIRKS string "{}"'.format(smirks) ) # Create atom mapping for query molecule idx_map = dict() for atom in qmol.GetAtoms(): smirks_index = atom.GetAtomMapNum() if smirks_index != 0: idx_map[smirks_index - 1] = atom.GetIdx() map_list = [idx_map[x] for x in sorted(idx_map)] # Perform matching matches = list() # choose the largest unsigned int without overflow # since the C++ signature is a uint max_matches = np.iinfo(np.uintc).max for match in rdmol.GetSubstructMatches( qmol, uniquify=False, maxMatches=max_matches, useChirality=True ): mas = [match[x] for x in map_list] matches.append(tuple(mas)) return matches def find_smarts_matches(self, molecule, smarts, aromaticity_model="OEAroModel_MDL"): """ Find all SMARTS matches for the specified molecule, using the specified aromaticity model. .. warning :: This API is experimental and subject to change. Parameters ---------- molecule : openff.toolkit.topology.Molecule The molecule for which all specified SMARTS matches are to be located smarts : str SMARTS string with optional SMIRKS-style atom tagging aromaticity_model : str, optional, default='OEAroModel_MDL' Molecule is prepared with this aromaticity model prior to querying. .. note :: Currently, the only supported ``aromaticity_model`` is ``OEAroModel_MDL`` """ rdmol = self.to_rdkit(molecule, aromaticity_model=aromaticity_model) return self._find_smarts_matches( rdmol, smarts, aromaticity_model="OEAroModel_MDL" ) # -------------------------------- # Stereochemistry RDKit utilities. # -------------------------------- def find_rings(self, molecule): """Find the rings in a given molecule. .. note :: For systems containing some special cases of connected rings, this function may not be well-behaved and may report a different number rings than expected. Some problematic cases include networks of many (5+) rings or bicyclic moieties (i.e. norbornane). Parameters ---------- molecule : openff.toolkit.topology.Molecule The molecule for which rings are to be found Returns ------- rings : tuple of tuples of atom indices Nested tuples, each containing the indices of atoms in each ring """ rdmol = molecule.to_rdkit() ring_info = rdmol.GetRingInfo() rings = ring_info.AtomRings() return rings @staticmethod def _find_undefined_stereo_atoms(rdmol, assign_stereo=False): """Find the chiral atoms with undefined stereochemsitry in the RDMol. Parameters ---------- rdmol : rdkit.RDMol The RDKit molecule. assign_stereo : bool, optional, default=False As a side effect, this function calls ``Chem.AssignStereochemistry()`` so by default we work on a molecule copy. Set this to ``True`` to avoid making a copy and assigning the stereochemistry to the Mol object. Returns ------- undefined_atom_indices : List[int] A list of atom indices that are chiral centers with undefined stereochemistry. See Also -------- rdkit.Chem.FindMolChiralCenters """ from rdkit import Chem if not assign_stereo: # Avoid modifying the original molecule. rdmol = copy.deepcopy(rdmol) # Flag possible chiral centers with the "_ChiralityPossible". Chem.AssignStereochemistry(rdmol, force=True, flagPossibleStereoCenters=True) # Find all atoms with undefined stereo. undefined_atom_indices = [] for atom_idx, atom in enumerate(rdmol.GetAtoms()): if atom.GetChiralTag() == Chem.ChiralType.CHI_UNSPECIFIED and atom.HasProp( "_ChiralityPossible" ): undefined_atom_indices.append(atom_idx) return undefined_atom_indices @staticmethod def _find_undefined_stereo_bonds(rdmol): """Find the chiral atoms with undefined stereochemsitry in the RDMol. Parameters ---------- rdmol : rdkit.RDMol The RDKit molecule. Returns ------- undefined_bond_indices : List[int] A list of bond indices with undefined stereochemistry. See Also -------- Chem.EnumerateStereoisomers._getFlippers Links ----- https://github.com/rdkit/rdkit/blob/master/Code/GraphMol/Chirality.cpp#L1509-L1515 This comment in FindPotentialStereoBonds mention that the method ignores ring bonds. https://github.com/DrrDom/rdk/blob/master/gen_stereo_rdkit3.py The function get_unspec_double_bonds() in this module looks like may solve the problem with the rings. """ from rdkit import Chem # Copy the molecule to avoid side effects. Chem.FindPotentialStereoBonds # assign Bond.STEREOANY to unspecific bond, which make subsequent calls # of Chem.AssignStereochemistry ignore the bond even if there are # ENDDOWNRIGHT/ENDUPRIGHT bond direction indications. rdmol_copy = copy.deepcopy(rdmol) # Clear any previous assignments on the bonds, since FindPotentialStereo may not overwrite it for bond in rdmol_copy.GetBonds(): bond.SetStereo(Chem.BondStereo.STEREONONE) # This function assigns Bond.GetStereo() == Bond.STEREOANY to bonds with # possible stereochemistry. Chem.FindPotentialStereoBonds(rdmol_copy, cleanIt=True) # Any TRULY stereogenic bonds in the molecule are now marked as STEREOANY in rdmol_copy. # Iterate through all the bonds, and for the ones where rdmol_copy is marked as STEREOANY, # ensure that they are cis/trans/E/Z (tested here be ensuring that they're NOT either # # of the other possible types (NONE or ANY)) undefined_bond_indices = [] for bond_idx, (orig_bond, repercieved_bond) in enumerate( zip(rdmol.GetBonds(), rdmol_copy.GetBonds()) ): # print(repercieved_bond.GetStereo(), orig_bond.GetStereo()) if (repercieved_bond.GetStereo() == Chem.BondStereo.STEREOANY) and ( (orig_bond.GetStereo() == Chem.BondStereo.STEREOANY) or (orig_bond.GetStereo() == Chem.BondStereo.STEREONONE) ): undefined_bond_indices.append(bond_idx) return undefined_bond_indices @classmethod def _detect_undefined_stereo(cls, rdmol, err_msg_prefix="", raise_warning=False): """Raise UndefinedStereochemistryError if the RDMol has undefined stereochemistry. Parameters ---------- rdmol : rdkit.Chem.Mol The RDKit molecule. err_msg_prefix : str, optional A string to prepend to the error/warning message. raise_warning : bool, optional, default=False If True, a warning is issued instead of an exception. Raises ------ UndefinedStereochemistryError If the RDMol has undefined atom or bond stereochemistry. """ # Find undefined atom/bond stereochemistry. undefined_atom_indices = cls._find_undefined_stereo_atoms(rdmol) undefined_bond_indices = cls._find_undefined_stereo_bonds(rdmol) # Build error message. if len(undefined_atom_indices) == 0 and len(undefined_bond_indices) == 0: msg = None else: msg = err_msg_prefix + "RDMol has unspecified stereochemistry. " # The "_Name" property is not always assigned. if rdmol.HasProp("_Name"): msg += "RDMol name: " + rdmol.GetProp("_Name") # Details about undefined atoms. if len(undefined_atom_indices) > 0: msg += "Undefined chiral centers are:\n" for undefined_atom_idx in undefined_atom_indices: msg += " - Atom {symbol} (index {index})\n".format( symbol=rdmol.GetAtomWithIdx(undefined_atom_idx).GetSymbol(), index=undefined_atom_idx, ) # Details about undefined bond. if len(undefined_bond_indices) > 0: msg += "Bonds with undefined stereochemistry are:\n" for undefined_bond_idx in undefined_bond_indices: bond = rdmol.GetBondWithIdx(undefined_bond_idx) atom1, atom2 = bond.GetBeginAtom(), bond.GetEndAtom() msg += " - Bond {bindex} (atoms {aindex1}-{aindex2} of element ({symbol1}-{symbol2})\n".format( bindex=undefined_bond_idx, aindex1=atom1.GetIdx(), aindex2=atom2.GetIdx(), symbol1=atom1.GetSymbol(), symbol2=atom2.GetSymbol(), ) if msg is not None: if raise_warning: msg = "Warning (not error because allow_undefined_stereo=True): " + msg logger.warning(msg) else: msg = "Unable to make OFFMol from RDMol: " + msg raise UndefinedStereochemistryError(msg) @staticmethod def _flip_rdbond_direction(rdbond, paired_rdbonds): """Flip the rdbond and all those paired to it. Parameters ---------- rdbond : rdkit.Chem.Bond The Bond whose direction needs to be flipped. paired_rdbonds : Dict[Tuple[int], List[rdkit.Chem.Bond]] Maps bond atom indices that are assigned a bond direction to the bonds on the other side of the double bond. """ from rdkit import Chem # The function assumes that all bonds are either up or down. supported_directions = {Chem.BondDir.ENDUPRIGHT, Chem.BondDir.ENDDOWNRIGHT} def _flip(b, paired, flipped, ignored): # The function assumes that all bonds are either up or down. assert b.GetBondDir() in supported_directions bond_atom_indices = (b.GetBeginAtomIdx(), b.GetEndAtomIdx()) # Check that we haven't flipped this bond already. if bond_atom_indices in flipped: # This should never happen. raise RuntimeError("Cannot flip the bond direction consistently.") # Flip the bond. if b.GetBondDir() == Chem.BondDir.ENDUPRIGHT: b.SetBondDir(Chem.BondDir.ENDDOWNRIGHT) else: b.SetBondDir(Chem.BondDir.ENDUPRIGHT) flipped.add(bond_atom_indices) # Flip all the paired bonds as well (if there are any). if bond_atom_indices in paired: for paired_rdbond in paired[bond_atom_indices]: # Don't flip the bond that was flipped in the upper-level recursion. if ( paired_rdbond.GetBeginAtomIdx(), paired_rdbond.GetEndAtomIdx(), ) != ignored: # Don't flip this bond in the next recursion. _flip(paired_rdbond, paired, flipped, ignored=bond_atom_indices) _flip(rdbond, paired_rdbonds, flipped=set(), ignored=None) @classmethod def _assign_rdmol_bonds_stereo(cls, offmol, rdmol): """Copy the info about bonds stereochemistry from the OFF Molecule to RDKit Mol.""" from rdkit import Chem # Map the bonds indices that are assigned bond direction # to the bond on the other side of the double bond. # (atom_index1, atom_index2) -> List[rdkit.Chem.Bond] paired_bonds = {} for bond in offmol.bonds: # No need to do anything with bonds without stereochemistry. if not bond.stereochemistry: continue # Isolate stereo RDKit bond object. rdbond_atom_indices = ( bond.atom1.molecule_atom_index, bond.atom2.molecule_atom_index, ) stereo_rdbond = rdmol.GetBondBetweenAtoms(*rdbond_atom_indices) # Collect all neighboring rdbonds of atom1 and atom2. neighbor_rdbonds1 = [ rdmol.GetBondBetweenAtoms( n.molecule_atom_index, bond.atom1.molecule_atom_index ) for n in bond.atom1.bonded_atoms if n != bond.atom2 ] neighbor_rdbonds2 = [ rdmol.GetBondBetweenAtoms( bond.atom2.molecule_atom_index, n.molecule_atom_index ) for n in bond.atom2.bonded_atoms if n != bond.atom1 ] # Select only 1 neighbor bond per atom out of the two. neighbor_rdbonds = [] for i, rdbonds in enumerate([neighbor_rdbonds1, neighbor_rdbonds2]): # If there are no neighbors for which we have already # assigned the bond direction, just pick the first one. neighbor_rdbonds.append(rdbonds[0]) # Otherwise, pick neighbor that was already assigned to # avoid inconsistencies and keep the tree non-cyclic. for rdb in rdbonds: if (rdb.GetBeginAtomIdx(), rdb.GetBeginAtomIdx()) in paired_bonds: neighbor_rdbonds[i] = rdb break # Assign a random direction to the bonds that were not already assigned # keeping track of which bond would be best to flip later (i.e. does that # are not already determining the stereochemistry of another double bond). flipped_rdbond = neighbor_rdbonds[0] for rdb in neighbor_rdbonds: if (rdb.GetBeginAtomIdx(), rdb.GetEndAtomIdx()) not in paired_bonds: rdb.SetBondDir(Chem.BondDir.ENDUPRIGHT) # Set this bond as a possible bond to flip. flipped_rdbond = rdb Chem.AssignStereochemistry(rdmol, cleanIt=True, force=True) # Verify that the current directions give us the desired stereochemistries. assert bond.stereochemistry in {"E", "Z"} if bond.stereochemistry == "E": desired_rdk_stereo_code = Chem.rdchem.BondStereo.STEREOE else: desired_rdk_stereo_code = Chem.rdchem.BondStereo.STEREOZ # If that doesn't work, flip the direction of one bond preferring # those that are not already determining the stereo of another bond. if stereo_rdbond.GetStereo() != desired_rdk_stereo_code: cls._flip_rdbond_direction(flipped_rdbond, paired_bonds) Chem.AssignStereochemistry(rdmol, cleanIt=True, force=True) # The stereo should be set correctly here. assert stereo_rdbond.GetStereo() == desired_rdk_stereo_code # Update paired bonds map. neighbor_bond_indices = [ (rdb.GetBeginAtomIdx(), rdb.GetEndAtomIdx()) for rdb in neighbor_rdbonds ] for i, bond_indices in enumerate(neighbor_bond_indices): try: paired_bonds[bond_indices].append(neighbor_rdbonds[1 - i]) except KeyError: paired_bonds[bond_indices] = [neighbor_rdbonds[1 - i]] class AmberToolsToolkitWrapper(ToolkitWrapper): """ AmberTools toolkit wrapper .. warning :: This API is experimental and subject to change. """ _toolkit_name = "AmberTools" _toolkit_installation_instructions = ( "The AmberTools toolkit (free and open source) can be found at " "https://anaconda.org/conda-forge/ambertools" ) def __init__(self): super().__init__() self._toolkit_file_read_formats = [] self._toolkit_file_write_formats = [] if not self.is_available(): raise ToolkitUnavailableException( f"The required toolkit {self._toolkit_name} is not " f"available. {self._toolkit_installation_instructions}" ) # TODO: More reliable way to extract AmberTools version out = subprocess.check_output(["antechamber", "-L"]) ambertools_version = out.decode("utf-8").split("\n")[1].split()[3].strip(":") self._toolkit_version = ambertools_version # TODO: Find AMBERHOME or executable home, checking miniconda if needed # Store an instance of an RDKitToolkitWrapper for file I/O self._rdkit_toolkit_wrapper = RDKitToolkitWrapper() @staticmethod def is_available(): """ Check whether the AmberTools toolkit is installed Returns ------- is_installed : bool True if AmberTools is installed, False otherwise. """ # TODO: Check all tools needed # TODO: How should we implement find_executable? ANTECHAMBER_PATH = find_executable("antechamber") if ANTECHAMBER_PATH is None: return False # AmberToolsToolkitWrapper needs RDKit to do basically anything, since its interface requires SDF I/O if not (RDKitToolkitWrapper.is_available()): return False return True def assign_partial_charges( self, molecule, partial_charge_method=None, use_conformers=None, strict_n_conformers=False, _cls=None, ): """ Compute partial charges with AmberTools using antechamber/sqm, and assign the new values to the partial_charges attribute. .. warning :: This API experimental and subject to change. .. todo :: * Do we want to also allow ESP/RESP charges? Parameters ---------- molecule : openff.toolkit.topology.Molecule Molecule for which partial charges are to be computed partial_charge_method : str, optional, default=None The charge model to use. One of ['gasteiger', 'am1bcc', 'am1-mulliken']. If None, 'am1-mulliken' will be used. use_conformers : iterable of simtk.unit.Quantity-wrapped numpy arrays, each with shape (n_atoms, 3) and dimension of distance. Optional, default = None List of (n_atoms x 3) simtk.unit.Quantities to use for partial charge calculation. If None, an appropriate number of conformers will be generated. strict_n_conformers : bool, default=False Whether to raise an exception if an invalid number of conformers is provided for the given charge method. If this is False and an invalid number of conformers is found, a warning will be raised. _cls : class Molecule constructor Raises ------ ChargeMethodUnavailableError if the requested charge method can not be handled by this toolkit ChargeCalculationError if the charge method is supported by this toolkit, but fails """ import os import subprocess from openff.toolkit.topology import Molecule if partial_charge_method is None: partial_charge_method = "am1-mulliken" else: # Standardize method name for string comparisons partial_charge_method = partial_charge_method.lower() SUPPORTED_CHARGE_METHODS = { "am1bcc": { "antechamber_keyword": "bcc", "min_confs": 1, "max_confs": 1, "rec_confs": 1, }, "am1-mulliken": { "antechamber_keyword": "mul", "min_confs": 1, "max_confs": 1, "rec_confs": 1, }, "gasteiger": { "antechamber_keyword": "gas", "min_confs": 0, "max_confs": 0, "rec_confs": 0, }, } if partial_charge_method not in SUPPORTED_CHARGE_METHODS: raise ChargeMethodUnavailableError( f"partial_charge_method '{partial_charge_method}' is not available from AmberToolsToolkitWrapper. " f"Available charge methods are {list(SUPPORTED_CHARGE_METHODS.keys())} " ) charge_method = SUPPORTED_CHARGE_METHODS[partial_charge_method] if _cls is None: from openff.toolkit.topology.molecule import Molecule _cls = Molecule # Make a temporary copy of the molecule, since we'll be messing with its conformers mol_copy = _cls(molecule) if use_conformers is None: if charge_method["rec_confs"] == 0: mol_copy._conformers = None else: mol_copy.generate_conformers( n_conformers=charge_method["rec_confs"], rms_cutoff=0.25 * unit.angstrom, toolkit_registry=RDKitToolkitWrapper(), ) # TODO: What's a "best practice" RMS cutoff to use here? else: mol_copy._conformers = None for conformer in use_conformers: mol_copy._add_conformer(conformer) self._check_n_conformers( mol_copy, partial_charge_method=partial_charge_method, min_confs=charge_method["min_confs"], max_confs=charge_method["max_confs"], strict_n_conformers=strict_n_conformers, ) # Find the path to antechamber # TODO: How should we implement find_executable? ANTECHAMBER_PATH = find_executable("antechamber") if ANTECHAMBER_PATH is None: raise AntechamberNotFoundError( "Antechamber not found, cannot run charge_mol()" ) # Compute charges with tempfile.TemporaryDirectory() as tmpdir: with temporary_cd(tmpdir): net_charge = mol_copy.total_charge / unit.elementary_charge # Write out molecule in SDF format ## TODO: How should we handle multiple conformers? self._rdkit_toolkit_wrapper.to_file( mol_copy, "molecule.sdf", file_format="sdf" ) # Compute desired charges # TODO: Add error handling if antechamber chokes short_charge_method = charge_method["antechamber_keyword"] subprocess.check_output( [ "antechamber", "-i", "molecule.sdf", "-fi", "sdf", "-o", "charged.mol2", "-fo", "mol2", "-pf", "yes", "-dr", "n", "-c", short_charge_method, "-nc", str(net_charge), ] ) # Write out just charges subprocess.check_output( [ "antechamber", "-dr", "n", "-i", "charged.mol2", "-fi", "mol2", "-o", "charges2.mol2", "-fo", "mol2", "-c", "wc", "-cf", "charges.txt", "-pf", "yes", ] ) # Check to ensure charges were actually produced if not os.path.exists("charges.txt"): # TODO: copy files into local directory to aid debugging? raise ChargeCalculationError( "Antechamber/sqm partial charge calculation failed on " "molecule {} (SMILES {})".format( molecule.name, molecule.to_smiles() ) ) # Read the charges with open("charges.txt", "r") as infile: contents = infile.read() text_charges = contents.split() charges = np.zeros([molecule.n_atoms], np.float64) for index, token in enumerate(text_charges): charges[index] = float(token) # TODO: Ensure that the atoms in charged.mol2 are in the same order as in molecule.sdf charges = unit.Quantity(charges, unit.elementary_charge) molecule.partial_charges = charges def compute_partial_charges_am1bcc( self, molecule, use_conformers=None, strict_n_conformers=False ): """ Compute partial charges with AmberTools using antechamber/sqm. This will calculate AM1-BCC charges on the first conformer only. .. warning :: This API is experimental and subject to change. Parameters ---------- molecule : Molecule Molecule for which partial charges are to be computed use_conformers : iterable of simtk.unit.Quantity-wrapped numpy arrays, each with shape (n_atoms, 3) and dimension of distance. Optional, default = None Coordinates to use for partial charge calculation. If None, an appropriate number of conformers will be generated. strict_n_conformers : bool, default=False Whether to raise an exception if an invalid number of conformers is provided. If this is False and an invalid number of conformers is found, a warning will be raised instead of an Exception. Returns ------- charges : numpy.array of shape (natoms) of type float The partial charges """ import warnings warnings.warn( "compute_partial_charges_am1bcc will be deprecated in an upcoming release. " "Use assign_partial_charges(partial_charge_method='am1bcc') instead.", DeprecationWarning, ) self.assign_partial_charges( molecule, partial_charge_method="AM1BCC", use_conformers=use_conformers, strict_n_conformers=strict_n_conformers, ) return molecule.partial_charges def _modify_sqm_in_to_request_bond_orders(self, file_path): """ Modify a sqm.in file produced by antechamber to include the "printbondorders=1" directive in the header. This method will overwrite the original file. Parameters ---------- file_path : str The path to sqm.in """ data = open(file_path).read() # Original sqm.in file headerlooks like: # Run semi-empirical minimization # &qmmm # qm_theory='AM1', grms_tol=0.0005, # scfconv=1.d-10, ndiis_attempts=700, qmcharge=0, # / # ... (atom coordinates in something like XYZ format) ... # To get WBOs, we need to add "printbondorders=1" to the list of keywords # First, split the sqm.in text at the "/" mark at the end of the header datasp = data.split("/") # Insert the "printbondorders" directive in a new line and re-add the "/" datasp.insert(1, "printbondorders=1, \n /") # Reassemble the file text new_data = "".join(datasp) # Write the new file contents, overwriting the original file. with open(file_path, "w") as of: of.write(new_data) def _get_fractional_bond_orders_from_sqm_out( self, file_path, validate_elements=None ): """ Process a SQM output file containing bond orders, and return a dict of the form dict[atom_1_index, atom_2_index] = fractional_bond_order Parameters ---------- file_path : str File path for sqm output file validate_elements : iterable of str The element symbols expected in molecule index order. A ValueError will be raised if the elements are not found in this order. Returns ------- bond_orders : dict[(int, int)]: float A dictionary where the keys are tuples of two atom indices and the values are floating-point bond orders. The keys are sorted in ascending order, such that the lower atom index is key[0] and the higher is key[1]. """ # Example sqm.out section with WBOs: # Bond Orders # # QMMM: NUM1 ELEM1 NUM2 ELEM2 BOND_ORDER # QMMM: 2 C 1 C 1.41107532 # QMMM: 3 C 1 C 1.41047804 # ... # QMMM: 15 H 13 H 0.00000954 # QMMM: 15 H 14 H 0.00000813 # # --------- Calculation Completed ---------- data = open(file_path).read() begin_sep = """ Bond Orders QMMM: NUM1 ELEM1 NUM2 ELEM2 BOND_ORDER """ end_sep = """ --------- Calculation Completed ---------- """ # Extract the chunk of text between begin_sep and end_sep, and split it by newline fbo_lines = data.split(begin_sep)[1].split(end_sep)[0].split("\n") # Iterate over the lines and populate the dict to return bond_orders = dict() for line in fbo_lines: linesp = line.split() atom_index_1 = int(linesp[1]) atom_element_1 = linesp[2] atom_index_2 = int(linesp[3]) atom_element_2 = linesp[4] bond_order = float(linesp[5]) # If validate_elements was provided, ensure that the ordering of element symbols is what we expected if validate_elements is not None: if (atom_element_1 != validate_elements[atom_index_1 - 1]) or ( atom_element_2 != validate_elements[atom_index_2 - 1] ): # raise ValueError('\n'.join(fbo_lines)) raise ValueError( f"Elements or indexing in sqm output differ from expectation. " f"Expected {validate_elements[atom_index_1]} with index {atom_index_1} and " f"{validate_elements[atom_index_2]} with index {atom_index_2}, " f"but SQM output has {atom_element_1} and {atom_element_2} for the same atoms." ) # To make lookup easier, we identify bonds as integer tuples with the lowest atom index # first and the highest second. index_tuple = tuple(sorted([atom_index_1, atom_index_2])) bond_orders[index_tuple] = bond_order return bond_orders def assign_fractional_bond_orders( self, molecule, bond_order_model=None, use_conformers=None, _cls=None ): """ Update and store list of bond orders this molecule. Bond orders are stored on each bond, in the `bond.fractional_bond_order` attribute. .. warning :: This API is experimental and subject to change. Parameters ---------- molecule : openff.toolkit.topology.molecule Molecule The molecule to assign wiberg bond orders to bond_order_model : str, optional, default=None The charge model to use. Only allowed value is 'am1-wiberg'. If None, 'am1-wiberg' will be used. use_conformers : iterable of simtk.unit.Quantity(np.array) with shape (n_atoms, 3) and dimension of distance, optional, default=None The conformers to use for fractional bond order calculation. If None, an appropriate number of conformers will be generated by an available ToolkitWrapper. _cls : class Molecule constructor """ from openff.toolkit.topology import Molecule # Find the path to antechamber # TODO: How should we implement find_executable? ANTECHAMBER_PATH = find_executable("antechamber") if ANTECHAMBER_PATH is None: raise AntechamberNotFoundError( "Antechamber not found, cannot run " "AmberToolsToolkitWrapper.assign_fractional_bond_orders()" ) if _cls is None: from openff.toolkit.topology.molecule import Molecule _cls = Molecule # Make a copy since we'll be messing with this molecule's conformers temp_mol = _cls(molecule) if use_conformers is None: temp_mol.generate_conformers( n_conformers=1, toolkit_registry=self._rdkit_toolkit_wrapper, ) else: temp_mol._conformers = None for conformer in use_conformers: temp_mol._add_conformer(conformer) if len(temp_mol.conformers) == 0: raise ValueError( "No conformers present in molecule submitted for fractional bond order calculation. Consider " "loading the molecule from a file with geometry already present or running " "molecule.generate_conformers() before calling molecule.assign_fractional_bond_orders" ) # Compute bond orders bond_order_model_to_antechamber_keyword = {"am1-wiberg": "mul"} supported_bond_order_models = list( bond_order_model_to_antechamber_keyword.keys() ) if bond_order_model is None: bond_order_model = "am1-wiberg" bond_order_model = bond_order_model.lower() if bond_order_model not in supported_bond_order_models: raise ValueError( f"Bond order model '{bond_order_model}' is not supported by AmberToolsToolkitWrapper. " f"Supported models are {supported_bond_order_models}" ) ac_charge_keyword = bond_order_model_to_antechamber_keyword[bond_order_model] bond_orders = defaultdict(list) for conformer in [*temp_mol.conformers]: with tempfile.TemporaryDirectory() as tmpdir: with temporary_cd(tmpdir): net_charge = temp_mol.total_charge # Write out molecule in SDF format temp_mol._conformers = [conformer] self._rdkit_toolkit_wrapper.to_file( temp_mol, "molecule.sdf", file_format="sdf" ) # Prepare sqm.in file as if we were going to run charge calc # TODO: Add error handling if antechamber chokes subprocess.check_output( [ "antechamber", "-i", "molecule.sdf", "-fi", "sdf", "-o", "sqm.in", "-fo", "sqmcrt", "-pf", "yes", "-c", ac_charge_keyword, "-nc", str(net_charge), ] ) # Modify sqm.in to request bond order calculation self._modify_sqm_in_to_request_bond_orders("sqm.in") # Run sqm to get bond orders subprocess.check_output( ["sqm", "-i", "sqm.in", "-o", "sqm.out", "-O"] ) # Ensure that antechamber/sqm did not change the indexing by checking against # an ordered list of element symbols for this molecule expected_elements = [at.element.symbol for at in molecule.atoms] conformer_bond_orders = ( self._get_fractional_bond_orders_from_sqm_out( "sqm.out", validate_elements=expected_elements ) ) for bond_indices, value in conformer_bond_orders.items(): bond_orders[bond_indices].append(value) # Note that sqm calculate WBOs for ALL PAIRS of atoms, not just those that have # bonds defined in the original molecule. So here we iterate over the bonds in # the original molecule and only nab the WBOs for those. for bond in molecule.bonds: # The atom index tuples that act as bond indices are ordered from lowest to highest by # _get_fractional_bond_orders_from_sqm_out, so here we make sure that we look them up in # sorted order as well sorted_atom_indices = sorted( tuple([bond.atom1_index + 1, bond.atom2_index + 1]) ) bond.fractional_bond_order = np.mean( bond_orders[tuple(sorted_atom_indices)] ) # ============================================================================================= # Toolkit registry # ============================================================================================= class ToolkitRegistry: """ Registry for ToolkitWrapper objects Examples -------- Register toolkits in a specified order, skipping if unavailable >>> from openff.toolkit.utils.toolkits import ToolkitRegistry >>> toolkit_precedence = [OpenEyeToolkitWrapper, RDKitToolkitWrapper, AmberToolsToolkitWrapper] >>> toolkit_registry = ToolkitRegistry(toolkit_precedence) >>> toolkit_registry ToolkitRegistry containing OpenEye Toolkit, The RDKit, AmberTools Register all available toolkits (in the order OpenEye, RDKit, AmberTools, built-in) >>> toolkits = [OpenEyeToolkitWrapper, RDKitToolkitWrapper, AmberToolsToolkitWrapper, BuiltInToolkitWrapper] >>> toolkit_registry = ToolkitRegistry(toolkit_precedence=toolkits) >>> toolkit_registry ToolkitRegistry containing OpenEye Toolkit, The RDKit, AmberTools, Built-in Toolkit Retrieve the global singleton toolkit registry, which is created when this module is imported from all available toolkits: >>> from openff.toolkit.utils.toolkits import GLOBAL_TOOLKIT_REGISTRY as toolkit_registry >>> toolkit_registry ToolkitRegistry containing OpenEye Toolkit, The RDKit, AmberTools, Built-in Toolkit Note that this will contain different ToolkitWrapper objects based on what toolkits are currently installed. .. warning :: This API is experimental and subject to change. """ def __init__( self, toolkit_precedence=[], exception_if_unavailable=True, _register_imported_toolkit_wrappers=False, ): """ Create an empty toolkit registry. Parameters ---------- toolkit_precedence : list, default=[] List of toolkit wrapper classes, in order of desired precedence when performing molecule operations. If None, no toolkits will be registered. exception_if_unavailable : bool, optional, default=True If True, an exception will be raised if the toolkit is unavailable _register_imported_toolkit_wrappers : bool, optional, default=False If True, will attempt to register all imported ToolkitWrapper subclasses that can be found in the order of toolkit_precedence, if specified. If toolkit_precedence is not specified, the default order is [OpenEyeToolkitWrapper, RDKitToolkitWrapper, AmberToolsToolkitWrapper, BuiltInToolkitWrapper]. """ self._toolkits = list() toolkits_to_register = list() if _register_imported_toolkit_wrappers: if toolkit_precedence is None: toolkit_precedence = [ OpenEyeToolkitWrapper, RDKitToolkitWrapper, AmberToolsToolkitWrapper, BuiltInToolkitWrapper, ] all_importable_toolkit_wrappers = all_subclasses(ToolkitWrapper) for toolkit in toolkit_precedence: if toolkit in all_importable_toolkit_wrappers: toolkits_to_register.append(toolkit) else: if toolkit_precedence: toolkits_to_register = toolkit_precedence if toolkits_to_register: for toolkit in toolkits_to_register: self.register_toolkit( toolkit, exception_if_unavailable=exception_if_unavailable ) @property def registered_toolkits(self): """ List registered toolkits. .. warning :: This API is experimental and subject to change. .. todo :: Should this return a generator? Deep copies? Classes? Toolkit names? Returns ------- toolkits : iterable of toolkit objects """ return list(self._toolkits) @property def registered_toolkit_versions(self): """ Return a dict containing the version of each registered toolkit. .. warning :: This API is experimental and subject to change. Returns ------- toolkit_versions : dict[str, str] A dictionary mapping names and versions of wrapped toolkits """ return dict( (tk.toolkit_name, tk.toolkit_version) for tk in self.registered_toolkits ) def register_toolkit(self, toolkit_wrapper, exception_if_unavailable=True): """ Register the provided toolkit wrapper class, instantiating an object of it. .. warning :: This API is experimental and subject to change. .. todo :: This method should raise an exception if the toolkit is unavailable, unless an optional argument is specified that silently avoids registration of toolkits that are unavailable. Parameters ---------- toolkit_wrapper : instance or subclass of ToolkitWrapper The toolkit wrapper to register or its class. exception_if_unavailable : bool, optional, default=True If True, an exception will be raised if the toolkit is unavailable """ # Instantiate class if class, or just add if already instantiated. if isinstance(toolkit_wrapper, type): try: toolkit_wrapper = toolkit_wrapper() except ToolkitUnavailableException: msg = "Unable to load toolkit '{}'. ".format( toolkit_wrapper._toolkit_name ) if exception_if_unavailable: raise ToolkitUnavailableException(msg) else: if "OpenEye" in msg: msg += ( "The Open Force Field Toolkit does not require the OpenEye Toolkits, and can " "use RDKit/AmberTools instead. However, if you have a valid license for the " "OpenEye Toolkits, consider installing them for faster performance and additional " "file format support: " "https://docs.eyesopen.com/toolkits/python/quickstart-python/linuxosx.html " "OpenEye offers free Toolkit licenses for academics: " "https://www.eyesopen.com/academic-licensing" ) logger.warning(f"Warning: {msg}") return # Add toolkit to the registry. self._toolkits.append(toolkit_wrapper) def deregister_toolkit(self, toolkit_wrapper): """ Remove a ToolkitWrapper from the list of toolkits in this ToolkitRegistry .. warning :: This API is experimental and subject to change. Parameters ---------- toolkit_wrapper : instance or subclass of ToolkitWrapper The toolkit wrapper to remove from the registry Raises ------ InvalidToolkitError If toolkit_wrapper is not a ToolkitWrapper or subclass ToolkitUnavailableException If toolkit_wrapper is not found in the registry """ # If passed a class, instantiate it if inspect.isclass(toolkit_wrapper): toolkit_wrapper = toolkit_wrapper() if not isinstance(toolkit_wrapper, ToolkitWrapper): msg = ( f"Argument {toolkit_wrapper} must an ToolkitWrapper " f"or subclass of it. Found type {type(toolkit_wrapper)}." ) raise InvalidToolkitError(msg) toolkits_to_remove = [] for toolkit in self._toolkits: if type(toolkit) == type(toolkit_wrapper): toolkits_to_remove.append(toolkit) if not toolkits_to_remove: msg = ( f"Did not find {toolkit_wrapper} in registry. " f"Currently registered toolkits are {self._toolkits}" ) raise ToolkitUnavailableException(msg) for toolkit_to_remove in toolkits_to_remove: self._toolkits.remove(toolkit_to_remove) def add_toolkit(self, toolkit_wrapper): """ Append a ToolkitWrapper onto the list of toolkits in this ToolkitRegistry .. warning :: This API is experimental and subject to change. Parameters ---------- toolkit_wrapper : openff.toolkit.utils.ToolkitWrapper The ToolkitWrapper object to add to the list of registered toolkits Raises ------ InvalidToolkitError If toolkit_wrapper is not a ToolkitWrapper or subclass """ if not isinstance(toolkit_wrapper, ToolkitWrapper): msg = "Something other than a ToolkitWrapper object was passed to ToolkitRegistry.add_toolkit()\n" msg += "Given object {} of type {}".format( toolkit_wrapper, type(toolkit_wrapper) ) raise InvalidToolkitError(msg) self._toolkits.append(toolkit_wrapper) # TODO: Can we automatically resolve calls to methods that are not explicitly defined using some Python magic? def resolve(self, method_name): """ Resolve the requested method name by checking all registered toolkits in order of precedence for one that provides the requested method. Parameters ---------- method_name : str The name of the method to resolve Returns ------- method The method of the first registered toolkit that provides the requested method name Raises ------ NotImplementedError if the requested method cannot be found among the registered toolkits Examples -------- Create a molecule, and call the toolkit ``to_smiles()`` method directly >>> from openff.toolkit.topology import Molecule >>> molecule = Molecule.from_smiles('Cc1ccccc1') >>> toolkit_registry = ToolkitRegistry([OpenEyeToolkitWrapper, RDKitToolkitWrapper, AmberToolsToolkitWrapper]) >>> method = toolkit_registry.resolve('to_smiles') >>> smiles = method(molecule) .. todo :: Is there a better way to figure out which toolkits implement given methods by introspection? """ for toolkit in self._toolkits: if hasattr(toolkit, method_name): method = getattr(toolkit, method_name) return method # No toolkit was found to provide the requested capability # TODO: Can we help developers by providing a check for typos in expected method names? msg = 'No registered toolkits can provide the capability "{}".\n'.format( method_name ) msg += "Available toolkits are: {}\n".format(self.registered_toolkits) raise NotImplementedError(msg) # TODO: Can we instead register available methods directly with `ToolkitRegistry`, so we can just use `ToolkitRegistry.method()`? def call(self, method_name, *args, raise_exception_types=None, **kwargs): """ Execute the requested method by attempting to use all registered toolkits in order of precedence. ``*args`` and ``**kwargs`` are passed to the desired method, and return values of the method are returned This is a convenient shorthand for ``toolkit_registry.resolve_method(method_name)(*args, **kwargs)`` Parameters ---------- method_name : str The name of the method to execute raise_exception_types : list of Exception subclasses, default=None A list of exception-derived types to catch and raise immediately. If None, this will be set to [Exception], which will raise an error immediately if the first ToolkitWrapper in the registry fails. To try each ToolkitWrapper that provides a suitably-named method, set this to the empty list ([]). If all ToolkitWrappers run without raising any exceptions in this list, a single ValueError will be raised containing the each ToolkitWrapper that was tried and the exception it raised. Raises ------ NotImplementedError if the requested method cannot be found among the registered toolkits ValueError if no exceptions in the raise_exception_types list were raised by ToolkitWrappers, and all ToolkitWrappers in the ToolkitRegistry were tried. Other forms of exceptions are possible if raise_exception_types is specified. These are defined by the ToolkitWrapper method being called. Examples -------- Create a molecule, and call the toolkit ``to_smiles()`` method directly >>> from openff.toolkit.topology import Molecule >>> molecule = Molecule.from_smiles('Cc1ccccc1') >>> toolkit_registry = ToolkitRegistry([OpenEyeToolkitWrapper, RDKitToolkitWrapper]) >>> smiles = toolkit_registry.call('to_smiles', molecule) """ if raise_exception_types is None: raise_exception_types = [Exception] errors = list() for toolkit in self._toolkits: if hasattr(toolkit, method_name): method = getattr(toolkit, method_name) try: return method(*args, **kwargs) except Exception as e: for exception_type in raise_exception_types: if isinstance(e, exception_type): raise e errors.append((toolkit, e)) # No toolkit was found to provide the requested capability # TODO: Can we help developers by providing a check for typos in expected method names? msg = ( f'No registered toolkits can provide the capability "{method_name}" ' f'for args "{args}" and kwargs "{kwargs}"\n' ) msg += "Available toolkits are: {}\n".format(self.registered_toolkits) # Append information about toolkits that implemented the method, but could not handle the provided parameters for toolkit, error in errors: msg += " {} {} : {}\n".format(toolkit, type(error), error) raise ValueError(msg) def __repr__(self): return f"ToolkitRegistry containing " + ", ".join( [tk.toolkit_name for tk in self._toolkits] ) # ============================================================================================= # GLOBAL TOOLKIT REGISTRY # ============================================================================================= # Create global toolkit registry, where all available toolkits are registered GLOBAL_TOOLKIT_REGISTRY = ToolkitRegistry( toolkit_precedence=[ OpenEyeToolkitWrapper, RDKitToolkitWrapper, AmberToolsToolkitWrapper, BuiltInToolkitWrapper, ], exception_if_unavailable=False, ) # ============================================================================================= # SET GLOBAL TOOLKIT-AVAIABLE VARIABLES # ============================================================================================= OPENEYE_AVAILABLE = False RDKIT_AVAILABLE = False AMBERTOOLS_AVAILABLE = False # Only available toolkits will have made it into the GLOBAL_TOOLKIT_REGISTRY for toolkit in GLOBAL_TOOLKIT_REGISTRY.registered_toolkits: if type(toolkit) is OpenEyeToolkitWrapper: OPENEYE_AVAILABLE = True elif type(toolkit) is RDKitToolkitWrapper: RDKIT_AVAILABLE = True elif type(toolkit) is AmberToolsToolkitWrapper: AMBERTOOLS_AVAILABLE = True # ============================================================================================= # WARN IF INSUFFICIENT TOOLKITS INSTALLED # ============================================================================================= # Define basic toolkits that handle essential file I/O BASIC_CHEMINFORMATICS_TOOLKITS = [RDKitToolkitWrapper, OpenEyeToolkitWrapper] # Ensure we have at least one basic toolkit if ( sum( [ tk.is_available() for tk in GLOBAL_TOOLKIT_REGISTRY.registered_toolkits if type(tk) in BASIC_CHEMINFORMATICS_TOOLKITS ] ) == 0 ): msg = "WARNING: No basic cheminformatics toolkits are available.\n" msg += "At least one basic toolkit is required to handle SMARTS matching and file I/O. \n" msg += "Please install at least one of the following basic toolkits:\n" for wrapper in all_subclasses(ToolkitWrapper): if wrapper.toolkit_name is not None: msg += "{} : {}\n".format( wrapper._toolkit_name, wrapper._toolkit_installation_instructions ) print(msg)
#!/usr/bin/env python """ Wrapper classes for providing a minimal consistent interface to cheminformatics toolkits Currently supported toolkits: * The `OpenEye Toolkit <https://docs.eyesopen.com/toolkits/python/quickstart-python/index.html>`_ * The `RDKit <http://www.rdkit.org/>`_ * `AmberTools <http://ambermd.org/AmberTools.php>`_ .. todo:: * Add checks at the beginning of each toolkit method call to make sure toolkit is licened * Switch toolkit methods to object methods instead of static methods * Should this be under ``openff.toolkit.utils.toolkits`` or ``openff.toolkit.toolkits``? * Add singleton global toolkit registry that registers all available toolkits by default when this file is imported * Add description fields for each toolkit wrapper * Eliminate global variables in favor of a singleton pattern * Change global variables from _INSTALLED to _AVAILABLE """ __all__ = [ "DEFAULT_AROMATICITY_MODEL", "ALLOWED_AROMATICITY_MODELS", "DEFAULT_FRACTIONAL_BOND_ORDER_MODEL", "ALLOWED_FRACTIONAL_BOND_ORDER_MODELS", "DEFAULT_CHARGE_MODEL", "ALLOWED_CHARGE_MODELS", "LicenseError", "MissingPackageError", "ToolkitUnavailableException", "InvalidToolkitError", "InvalidToolkitRegistryError", "UndefinedStereochemistryError", "GAFFAtomTypeWarning", "ToolkitWrapper", "BuiltInToolkitWrapper", "OpenEyeToolkitWrapper", "RDKitToolkitWrapper", "AmberToolsToolkitWrapper", "BuiltInToolkitWrapper", "ToolkitRegistry", "GLOBAL_TOOLKIT_REGISTRY", "OPENEYE_AVAILABLE", "RDKIT_AVAILABLE", "AMBERTOOLS_AVAILABLE", "BASIC_CHEMINFORMATICS_TOOLKITS", ] # ============================================================================================= # GLOBAL IMPORTS # ============================================================================================= import copy import importlib import inspect import itertools import logging import re import subprocess import tempfile from collections import defaultdict from distutils.spawn import find_executable from functools import wraps from typing import TYPE_CHECKING, List, Optional, Tuple import numpy as np from simtk import unit from openff.toolkit.utils.utils import ( MessageException, all_subclasses, inherit_docstrings, temporary_cd, ) if TYPE_CHECKING: from openforcefield.topology.molecule import Molecule # ============================================================================================= # CONFIGURE LOGGER # ============================================================================================= logger = logging.getLogger(__name__) # ============================================================================================= # SUPPORTED MODELS # # TODO: We may no longer need these since we now require SMIRNOFF to specify these models explicitly. # ============================================================================================= DEFAULT_AROMATICITY_MODEL = "OEAroModel_MDL" # TODO: Is there a more specific name and reference for the aromaticity model? ALLOWED_AROMATICITY_MODELS = ["OEAroModel_MDL"] DEFAULT_FRACTIONAL_BOND_ORDER_MODEL = "Wiberg" # TODO: Is there a more specific name and reference for the fractional bond order models? ALLOWED_FRACTIONAL_BOND_ORDER_MODELS = ["Wiberg"] DEFAULT_CHARGE_MODEL = "AM1-BCC" # TODO: Should this be `AM1-BCC`, or should we encode BCCs explicitly via AM1-CM2 preprocessing? ALLOWED_CHARGE_MODELS = ["AM1-BCC"] # TODO: Which models do we want to support? # ============================================================================================= # Exceptions # ============================================================================================= class MissingPackageError(MessageException): """This function requires a package that is not installed.""" class ToolkitUnavailableException(MessageException): """The requested toolkit is unavailable.""" # TODO: Allow toolkit to be specified and used in formatting/printing exception. class LicenseError(ToolkitUnavailableException): """This function requires a license that cannot be found.""" class InvalidToolkitError(MessageException): """A non-toolkit object was received when a toolkit object was expected""" class InvalidToolkitRegistryError(MessageException): """An object other than a ToolkitRegistry or toolkit wrapper was received""" class UndefinedStereochemistryError(MessageException): """A molecule was attempted to be loaded with undefined stereochemistry""" class GAFFAtomTypeWarning(RuntimeWarning): """A warning raised if a loaded mol2 file possibly uses GAFF atom types.""" class ChargeMethodUnavailableError(MessageException): """A toolkit does not support the requested partial_charge_method combination""" class IncorrectNumConformersError(MessageException): """The requested partial_charge_method expects a different number of conformers than was provided""" class IncorrectNumConformersWarning(Warning): """The requested partial_charge_method expects a different number of conformers than was provided""" class ChargeCalculationError(MessageException): """An unhandled error occured in an external toolkit during charge calculation""" class InvalidIUPACNameError(MessageException): """Failed to parse IUPAC name""" class AntechamberNotFoundError(MessageException): """The antechamber executable was not found""" # ============================================================================================= # TOOLKIT UTILITY DECORATORS # ============================================================================================= # ============================================================================================= # UTILITY FUNCTIONS # ============================================================================================= # ============================================================================================= # CHEMINFORMATICS TOOLKIT WRAPPERS # ============================================================================================= class ToolkitWrapper: """ Toolkit wrapper base class. .. warning :: This API is experimental and subject to change. """ _is_available = None # True if toolkit is available _toolkit_version = None _toolkit_name = None # Name of the toolkit _toolkit_installation_instructions = ( None # Installation instructions for the toolkit ) # @staticmethod # TODO: Right now, to access the class definition, I have to make this a classmethod # and thereby call it with () on the outermost decorator. Is this wasting time? Are we caching # the is_available results? @classmethod def requires_toolkit(cls): # remember cls is a ToolkitWrapper subclass here def decorator(func): @wraps(func) def wrapped_function(*args, **kwargs): if not cls.is_available(): msg = "This function requires the {} toolkit".format( cls._toolkit_name ) raise ToolkitUnavailableException(msg) value = func(*args, **kwargs) return value return wrapped_function return decorator @property # @classmethod def toolkit_name(self): """ Return the name of the toolkit wrapped by this class as a str .. warning :: This API is experimental and subject to change. Returns ------- toolkit_name : str The name of the wrapped toolkit """ return self.__class__._toolkit_name @property # @classmethod def toolkit_installation_instructions(self): """ Instructions on how to install the wrapped toolkit. """ return self._toolkit_installation_instructions # @classmethod @property def toolkit_file_read_formats(self): """ List of file formats that this toolkit can read. """ return self._toolkit_file_read_formats # @classmethod @property def toolkit_file_write_formats(self): """ List of file formats that this toolkit can write. """ return self._toolkit_file_write_formats @classmethod def is_available(cls): """ Check whether the corresponding toolkit can be imported Returns ------- is_installed : bool True if corresponding toolkit is installed, False otherwise. """ return NotImplementedError @property def toolkit_version(self): """ Return the version of the wrapped toolkit as a str .. warning :: This API is experimental and subject to change. Returns ------- toolkit_version : str The version of the wrapped toolkit """ return self._toolkit_version def from_file(self, file_path, file_format, allow_undefined_stereo=False): """ Return an openff.toolkit.topology.Molecule from a file using this toolkit. Parameters ---------- file_path : str The file to read the molecule from file_format : str Format specifier, usually file suffix (eg. 'MOL2', 'SMI') Note that not all toolkits support all formats. Check ToolkitWrapper.toolkit_file_read_formats for details. allow_undefined_stereo : bool, default=False If false, raises an exception if any molecules contain undefined stereochemistry. _cls : class Molecule constructor Returns ------- molecules : Molecule or list of Molecules a list of Molecule objects is returned. """ return NotImplementedError def from_file_obj( self, file_obj, file_format, allow_undefined_stereo=False, _cls=None ): """ Return an openff.toolkit.topology.Molecule from a file-like object (an object with a ".read()" method using this toolkit. Parameters ---------- file_obj : file-like object The file-like object to read the molecule from file_format : str Format specifier, usually file suffix (eg. 'MOL2', 'SMI') Note that not all toolkits support all formats. Check ToolkitWrapper.toolkit_file_read_formats for details. allow_undefined_stereo : bool, default=False If false, raises an exception if any molecules contain undefined stereochemistry. If false, the function skips loading the molecule. _cls : class Molecule constructor Returns ------- molecules : Molecule or list of Molecules a list of Molecule objects is returned. """ return NotImplementedError def _check_n_conformers( self, molecule, partial_charge_method, min_confs=None, max_confs=None, strict_n_conformers=False, ): """ Private method for validating the number of conformers on a molecule prior to partial charge calculation Parameters ---------- molecule : Molecule Molecule for which partial charges are to be computed partial_charge_method : str, optional, default=None The name of the charge method being used min_confs : int, optional, default=None The minimum number of conformers required to use this charge method max_confs : int, optional, default=None The maximum number of conformers required to use this charge method strict_n_conformers : bool, default=False Whether to raise an exception if an invalid number of conformers is provided. If this is False and an invalid number of conformers is found, a warning will be raised. Raises ------ IncorrectNumConformersError If the wrong number of conformers is attached to the input molecule, and strict_n_conformers is True. """ import warnings n_confs = molecule.n_conformers wrong_confs_msg = ( f"Molecule '{molecule}' has {n_confs} conformers, " f"but charge method '{partial_charge_method}' expects" ) exception_suffix = ( "You can disable this error by setting `strict_n_conformers=False' " "when calling 'molecule.assign_partial_charges'." ) # If there's no n_confs filter, then this molecule automatically passes if min_confs is None and max_confs is None: return # If there's constraints on both ends, check both limits elif min_confs is not None and max_confs is not None: if not (min_confs <= n_confs <= max_confs): if min_confs == max_confs: wrong_confs_msg += f" exactly {min_confs}." else: wrong_confs_msg += f" between {min_confs} and {max_confs}." else: return # If there's only a max constraint, check that elif min_confs is not None and max_confs is None: if not (min_confs <= n_confs): wrong_confs_msg += f" at least {min_confs}." else: return # If there's only a maximum constraint, check that elif min_confs is None and max_confs is not None: if not (n_confs <= max_confs): wrong_confs_msg += f" at most {max_confs}." else: return # If we've made it this far, the molecule has the wrong number of conformers if strict_n_conformers: wrong_confs_msg += exception_suffix raise IncorrectNumConformersError(wrong_confs_msg) else: warnings.warn(wrong_confs_msg, IncorrectNumConformersWarning) def __repr__(self): return ( f"ToolkitWrapper around {self.toolkit_name} version {self.toolkit_version}" ) @inherit_docstrings class BuiltInToolkitWrapper(ToolkitWrapper): """ Built-in ToolkitWrapper for very basic functionality. This is intended for use in testing and not much more. .. warning :: This API is experimental and subject to change. """ _toolkit_name = "Built-in Toolkit" _toolkit_installation_instructions = ( "This toolkit is installed with the Open Force Field Toolkit and does " "not require additional dependencies." ) def __init__(self): super().__init__() self._toolkit_file_read_formats = [] self._toolkit_file_write_formats = [] def assign_partial_charges( self, molecule, partial_charge_method=None, use_conformers=None, strict_n_conformers=False, _cls=None, ): """ Compute partial charges with the built-in toolkit using simple arithmetic operations, and assign the new values to the partial_charges attribute. .. warning :: This API is experimental and subject to change. Parameters ---------- molecule : openff.toolkit.topology.Molecule Molecule for which partial charges are to be computed partial_charge_method: str, optional, default=None The charge model to use. One of ['zeros', 'formal_charge']. If None, 'formal_charge' will be used. use_conformers : iterable of simtk.unit.Quantity-wrapped numpy arrays, each with shape (n_atoms, 3) and dimension of distance. Optional, default = None Coordinates to use for partial charge calculation. If None, an appropriate number of conformers will be generated. strict_n_conformers : bool, default=False Whether to raise an exception if an invalid number of conformers is provided for the given charge method. If this is False and an invalid number of conformers is found, a warning will be raised instead of an Exception. _cls : class Molecule constructor Raises ------ ChargeMethodUnavailableError if the requested charge method can not be handled by this toolkit IncorrectNumConformersError if strict_n_conformers is True and use_conformers is provided and specifies an invalid number of conformers for the requested method ChargeCalculationError if the charge calculation is supported by this toolkit, but fails """ PARTIAL_CHARGE_METHODS = { "zeros": {"rec_confs": 0, "min_confs": 0, "max_confs": 0}, "formal_charge": {"rec_confs": 0, "min_confs": 0, "max_confs": 0}, } if partial_charge_method is None: partial_charge_method = "formal_charge" if _cls is None: from openff.toolkit.topology.molecule import Molecule _cls = Molecule # Make a temporary copy of the molecule, since we'll be messing with its conformers mol_copy = _cls(molecule) partial_charge_method = partial_charge_method.lower() if partial_charge_method not in PARTIAL_CHARGE_METHODS: raise ChargeMethodUnavailableError( f'Partial charge method "{partial_charge_method}"" is not supported by ' f"the Built-in toolkit. Available charge methods are " f"{list(PARTIAL_CHARGE_METHODS.keys())}" ) if use_conformers is None: # Note that this refers back to the GLOBAL_TOOLKIT_REGISTRY by default, since # BuiltInToolkitWrapper can't generate conformers mol_copy.generate_conformers( n_conformers=PARTIAL_CHARGE_METHODS[partial_charge_method]["rec_confs"] ) else: mol_copy._conformers = None for conformer in use_conformers: mol_copy._add_conformer(conformer) self._check_n_conformers( mol_copy, partial_charge_method=partial_charge_method, min_confs=0, max_confs=0, strict_n_conformers=strict_n_conformers, ) partial_charges = unit.Quantity( np.zeros((molecule.n_particles)), unit.elementary_charge ) if partial_charge_method == "zeroes": pass elif partial_charge_method == "formal_charge": for part_idx, particle in enumerate(molecule.particles): partial_charges[part_idx] = particle.formal_charge molecule.partial_charges = partial_charges @inherit_docstrings class OpenEyeToolkitWrapper(ToolkitWrapper): """ OpenEye toolkit wrapper .. warning :: This API is experimental and subject to change. """ _toolkit_name = "OpenEye Toolkit" _toolkit_installation_instructions = ( "The OpenEye toolkit requires a (free for academics) license, and can be " "found at: " "https://docs.eyesopen.com/toolkits/python/quickstart-python/install.html" ) # This could belong to ToolkitWrapper, although it seems strange # to carry that data for open-source toolkits _is_licensed = None # Only for OpenEye is there potentially a difference between # being available and installed _is_installed = None _license_functions = { "oechem": "OEChemIsLicensed", "oequacpac": "OEQuacPacIsLicensed", "oeiupac": "OEIUPACIsLicensed", "oeomega": "OEOmegaIsLicensed", } def __init__(self): self._toolkit_file_read_formats = [ "CAN", "CDX", "CSV", "FASTA", "INCHI", "INCHIKEY", "ISM", "MDL", "MF", "MMOD", "MOL2", "MOL2H", "MOPAC", "OEB", "PDB", "RDF", "SDF", "SKC", "SLN", "SMI", "USM", "XYC", ] self._toolkit_file_write_formats = [ "CAN", "CDX", "CSV", "FASTA", "INCHI", "INCHIKEY", "ISM", "MDL", "MF", "MMOD", "MOL2", "MOL2H", "MOPAC", "OEB", "PDB", "RDF", "SDF", "SKC", "SLN", "SMI", "USM", "XYC", ] # check if the toolkit can be loaded if not self.is_available(): msg = ( f"The required toolkit {self._toolkit_name} is not " f"available. {self._toolkit_installation_instructions}" ) if self._is_installed is False: raise ToolkitUnavailableException(msg) if self._is_licensed is False: raise LicenseError(msg) from openeye import __version__ as openeye_version self._toolkit_version = openeye_version @classmethod def _check_licenses(cls): """Check license of all known OpenEye tools. Returns True if any are found to be licensed, False if any are not.""" for (tool, license_func) in cls._license_functions.items(): try: module = importlib.import_module("openeye." + tool) except (ImportError, ModuleNotFoundError): continue else: if getattr(module, license_func)(): return True return False @classmethod def is_available(cls): """ Check if the given OpenEye toolkit components are available. If the OpenEye toolkit is not installed or no license is found for at least one the required toolkits , ``False`` is returned. Returns ------- all_installed : bool ``True`` if all required OpenEye tools are installed and licensed, ``False`` otherwise """ if cls._is_available is None: if cls._is_licensed is None: cls._is_licensed = cls._check_licenses() if cls._is_installed is None: for tool in cls._license_functions.keys(): cls._is_installed = True try: importlib.import_module("openeye." + tool) except (ImportError, ModuleNotFoundError): cls._is_installed = False cls._is_available = cls._is_installed and cls._is_licensed return cls._is_available def from_object(self, obj, allow_undefined_stereo=False, _cls=None): """ If given an OEMol (or OEMol-derived object), this function will load it into an openff.toolkit.topology.molecule Parameters ---------- obj : A molecule-like object An object to by type-checked. allow_undefined_stereo : bool, default=False Whether to accept molecules with undefined stereocenters. If False, an exception will be raised if a molecule with undefined stereochemistry is passed into this function. _cls : class Molecule constructor Returns ------- Molecule An openff.toolkit.topology.molecule Molecule. Raises ------ NotImplementedError If the object could not be converted into a Molecule. """ # TODO: Add tests for the from_object functions from openeye import oechem if _cls is None: from openff.toolkit.topology.molecule import Molecule _cls = Molecule if isinstance(obj, oechem.OEMolBase): return self.from_openeye( oemol=obj, allow_undefined_stereo=allow_undefined_stereo, _cls=_cls ) raise NotImplementedError( "Cannot create Molecule from {} object".format(type(obj)) ) def from_file( self, file_path, file_format, allow_undefined_stereo=False, _cls=None ): """ Return an openff.toolkit.topology.Molecule from a file using this toolkit. Parameters ---------- file_path : str The file to read the molecule from file_format : str Format specifier, usually file suffix (eg. 'MOL2', 'SMI') Note that not all toolkits support all formats. Check ToolkitWrapper.toolkit_file_read_formats for details. allow_undefined_stereo : bool, default=False If false, raises an exception if oemol contains undefined stereochemistry. _cls : class Molecule constructor Returns ------- molecules : List[Molecule] The list of ``Molecule`` objects in the file. Raises ------ GAFFAtomTypeWarning If the loaded mol2 file possibly uses GAFF atom types, which are not supported. Examples -------- Load a mol2 file into an OpenFF ``Molecule`` object. >>> from openff.toolkit.utils import get_data_file_path >>> mol2_file_path = get_data_file_path('molecules/cyclohexane.mol2') >>> toolkit = OpenEyeToolkitWrapper() >>> molecule = toolkit.from_file(mol2_file_path, file_format='mol2') """ from openeye import oechem ifs = oechem.oemolistream(file_path) return self._read_oemolistream_molecules( ifs, allow_undefined_stereo, file_path=file_path, _cls=_cls ) def from_file_obj( self, file_obj, file_format, allow_undefined_stereo=False, _cls=None ): """ Return an openff.toolkit.topology.Molecule from a file-like object (an object with a ".read()" method using this toolkit. Parameters ---------- file_obj : file-like object The file-like object to read the molecule from file_format : str Format specifier, usually file suffix (eg. 'MOL2', 'SMI') Note that not all toolkits support all formats. Check ToolkitWrapper.toolkit_file_read_formats for details. allow_undefined_stereo : bool, default=False If false, raises an exception if oemol contains undefined stereochemistry. _cls : class Molecule constructor Returns ------- molecules : List[Molecule] The list of Molecule objects in the file object. Raises ------ GAFFAtomTypeWarning If the loaded mol2 file possibly uses GAFF atom types, which are not supported. """ from openeye import oechem # Configure input molecule stream. ifs = oechem.oemolistream() ifs.openstring(file_obj.read()) oeformat = getattr(oechem, "OEFormat_" + file_format) ifs.SetFormat(oeformat) return self._read_oemolistream_molecules(ifs, allow_undefined_stereo, _cls=_cls) def to_file_obj(self, molecule, file_obj, file_format): """ Writes an OpenFF Molecule to a file-like object Parameters ---------- molecule : an OpenFF Molecule The molecule to write file_obj The file-like object to write to file_format The format for writing the molecule data """ with tempfile.TemporaryDirectory() as tmpdir: with temporary_cd(tmpdir): outfile = "temp_molecule." + file_format self.to_file(molecule, outfile, file_format) file_data = open(outfile).read() file_obj.write(file_data) def to_file(self, molecule, file_path, file_format): """ Writes an OpenFF Molecule to a file-like object Parameters ---------- molecule : an OpenFF Molecule The molecule to write file_path The file path to write to. file_format The format for writing the molecule data """ from openeye import oechem oemol = self.to_openeye(molecule) ofs = oechem.oemolostream(file_path) openeye_format = getattr(oechem, "OEFormat_" + file_format.upper()) ofs.SetFormat(openeye_format) # OFFTK strictly treats SDF as a single-conformer format. # We need to override OETK's behavior here if the user is saving a multiconformer molecule. # Remove all but the first conformer when writing to SDF as we only support single conformer format if (file_format.lower() == "sdf") and oemol.NumConfs() > 1: conf1 = [conf for conf in oemol.GetConfs()][0] flat_coords = list() for idx, coord in conf1.GetCoords().items(): flat_coords.extend(coord) oemol.DeleteConfs() oecoords = oechem.OEFloatArray(flat_coords) oemol.NewConf(oecoords) # We're standardizing on putting partial charges into SDFs under the `atom.dprop.PartialCharge` property if (file_format.lower() == "sdf") and (molecule.partial_charges is not None): partial_charges_list = [ oeatom.GetPartialCharge() for oeatom in oemol.GetAtoms() ] partial_charges_str = " ".join([f"{val:f}" for val in partial_charges_list]) # TODO: "dprop" means "double precision" -- Is there any way to make Python more accurately # describe/infer the proper data type? oechem.OESetSDData(oemol, "atom.dprop.PartialCharge", partial_charges_str) # If the file format is "pdb" using OEWriteMolecule() rearranges the atoms (hydrogens are pushed to the bottom) # Issue #475 (https://github.com/openforcefield/openff-toolkit/issues/475) # dfhahn's workaround: Using OEWritePDBFile does not alter the atom arrangement if file_format.lower() == "pdb": if oemol.NumConfs() > 1: for conf in oemol.GetConfs(): oechem.OEWritePDBFile(ofs, conf, oechem.OEOFlavor_PDB_BONDS) else: oechem.OEWritePDBFile(ofs, oemol, oechem.OEOFlavor_PDB_BONDS) else: oechem.OEWriteMolecule(ofs, oemol) ofs.close() @staticmethod def _turn_oemolbase_sd_charges_into_partial_charges(oemol): """ Process an OEMolBase object and check to see whether it has an SD data pair where the tag is "atom.dprop.PartialCharge", indicating that it has a list of atomic partial charges. If so, apply those charges to the OEAtoms in the OEMolBase, and delete the SD data pair. Parameters ---------- oemol : openeye.oechem.OEMolBase The molecule to process Returns ------- charges_are_present : bool Whether charges are present in the SD file. This is necessary because OEAtoms have a default partial charge of 0.0, which makes truly zero-charge molecules (eg "N2", "Ar"...) indistinguishable from molecules for which partial charges have not been assigned. The OFF Toolkit allows this distinction with mol.partial_charges=None. In order to complete roundtrips within the OFFMol spec, we must interpret the presence or absence of this tag as a proxy for mol.partial_charges=None. """ from openeye import oechem for dp in oechem.OEGetSDDataPairs(oemol): if dp.GetTag() == "atom.dprop.PartialCharge": charges_str = oechem.OEGetSDData(oemol, "atom.dprop.PartialCharge") charges_unitless = [float(i) for i in charges_str.split()] assert len(charges_unitless) == oemol.NumAtoms() for charge, oeatom in zip(charges_unitless, oemol.GetAtoms()): oeatom.SetPartialCharge(charge) oechem.OEDeleteSDData(oemol, "atom.dprop.PartialCharge") return True return False def _read_oemolistream_molecules( self, oemolistream, allow_undefined_stereo, file_path=None, _cls=None ): """ Reads and return the Molecules in a OEMol input stream. Parameters ---------- oemolistream : oechem.oemolistream The OEMol input stream to read from. allow_undefined_stereo : bool If false, raises an exception if oemol contains undefined stereochemistry. file_path : str, optional The path to the mol2 file. This is used exclusively to make the error message more meaningful when the mol2 files doesn't use Tripos atom types. _cls : class Molecule constructor Returns ------- molecules : List[Molecule] The list of Molecule objects in the stream. """ from openeye import oechem mols = list() oemol = oechem.OEMol() while oechem.OEReadMolecule(oemolistream, oemol): oechem.OEPerceiveChiral(oemol) oechem.OEAssignAromaticFlags(oemol, oechem.OEAroModel_MDL) oechem.OE3DToInternalStereo(oemol) # If this is either a multi-conformer or multi-molecule SD file, check to see if there are partial charges if (oemolistream.GetFormat() == oechem.OEFormat_SDF) and hasattr( oemol, "GetConfs" ): # The openFF toolkit treats each conformer in a "multiconformer" SDF as # a separate molecule. # https://github.com/openforcefield/openff-toolkit/issues/202 # Note that there is ambiguity about how SD data and "multiconformer" SD files should be stored. # As a result, we have to do some weird stuff below, as discussed in # https://docs.eyesopen.com/toolkits/python/oechemtk/oemol.html#dude-where-s-my-sd-data # Jeff: I was unable to find a way to distinguish whether a SDF was multiconformer or not. # The logic below should handle either single- or multi-conformer SDFs. for conf in oemol.GetConfIter(): # First, we turn "conf" into an OEMCMol (OE multiconformer mol), since OTHER file formats # really are multiconformer, and we will eventually feed this into the `from_openeye` function, # which is made to ingest multiconformer mols. this_conf_oemcmol = conf.GetMCMol() # Then, we take any SD data pairs that were on the oemol, and copy them on to "this_conf_oemcmol". # These SD pairs will be populated if we're dealing with a single-conformer SDF. for dp in oechem.OEGetSDDataPairs(oemol): oechem.OESetSDData( this_conf_oemcmol, dp.GetTag(), dp.GetValue() ) # On the other hand, these SD pairs will be populated if we're dealing with a MULTI-conformer SDF. for dp in oechem.OEGetSDDataPairs(conf): oechem.OESetSDData( this_conf_oemcmol, dp.GetTag(), dp.GetValue() ) # This function fishes out the special SD data tag we use for partial charge # ("atom.dprop.PartialCharge"), and applies those as OETK-supported partial charges on the OEAtoms has_charges = self._turn_oemolbase_sd_charges_into_partial_charges( this_conf_oemcmol ) # Finally, we feed the molecule into `from_openeye`, where it converted into an OFFMol mol = self.from_openeye( this_conf_oemcmol, allow_undefined_stereo=allow_undefined_stereo, _cls=_cls, ) # If the molecule didn't even have the `PartialCharges` tag, we set it from zeroes to None here. if not (has_charges): mol.partial_charges = None mols.append(mol) else: # In case this is being read from a SINGLE-molecule SD file, convert the SD field where we # stash partial charges into actual per-atom partial charges self._turn_oemolbase_sd_charges_into_partial_charges(oemol) mol = self.from_openeye( oemol, allow_undefined_stereo=allow_undefined_stereo, _cls=_cls ) mols.append(mol) # Check if this is an AMBER-produced mol2 file, which we can not load because they use GAFF atom types. if oemolistream.GetFormat() == oechem.OEFormat_MOL2: self._check_mol2_gaff_atom_type(mol, file_path) return mols def enumerate_protomers(self, molecule, max_states=10): """ Enumerate the formal charges of a molecule to generate different protomoers. Parameters ---------- molecule: openff.toolkit.topology.Molecule The molecule whose state we should enumerate max_states: int optional, default=10, The maximum number of protomer states to be returned. Returns ------- molecules: List[openff.toolkit.topology.Molecule], A list of the protomers of the input molecules not including the input. """ from openeye import oequacpac options = oequacpac.OEFormalChargeOptions() # add one as the input is included options.SetMaxCount(max_states + 1) molecules = [] oemol = self.to_openeye(molecule=molecule) for protomer in oequacpac.OEEnumerateFormalCharges(oemol, options): mol = self.from_openeye( protomer, allow_undefined_stereo=True, _cls=molecule.__class__ ) if mol != molecule: molecules.append(mol) return molecules def enumerate_stereoisomers( self, molecule, undefined_only=False, max_isomers=20, rationalise=True ): """ Enumerate the stereocenters and bonds of the current molecule. Parameters ---------- molecule: openff.toolkit.topology.Molecule The molecule whose state we should enumerate undefined_only: bool optional, default=False If we should enumerate all stereocenters and bonds or only those with undefined stereochemistry max_isomers: int optional, default=20 The maximum amount of molecules that should be returned rationalise: bool optional, default=True If we should try to build and rationalise the molecule to ensure it can exist Returns -------- molecules: List[openff.toolkit.topology.Molecule] A list of openff.toolkit.topology.Molecule instances """ from openeye import oechem, oeomega oemol = self.to_openeye(molecule=molecule) # arguments for this function can be found here # <https://docs.eyesopen.com/toolkits/python/omegatk/OEConfGenFunctions/OEFlipper.html?highlight=stereoisomers> molecules = [] for isomer in oeomega.OEFlipper(oemol, 200, not undefined_only, True, False): if rationalise: # try and determine if the molecule is reasonable by generating a conformer with # strict stereo, like embedding in rdkit omega = oeomega.OEOmega() omega.SetMaxConfs(1) omega.SetCanonOrder(False) # Don't generate random stereoisomer if not specified omega.SetStrictStereo(True) mol = oechem.OEMol(isomer) status = omega(mol) if status: isomol = self.from_openeye(mol, _cls=molecule.__class__) if isomol != molecule: molecules.append(isomol) else: isomol = self.from_openeye(isomer, _cls=molecule.__class__) if isomol != molecule: molecules.append(isomol) return molecules[:max_isomers] def enumerate_tautomers(self, molecule, max_states=20): """ Enumerate the possible tautomers of the current molecule Parameters ---------- molecule: openff.toolkit.topology.Molecule The molecule whose state we should enumerate max_states: int optional, default=20 The maximum amount of molecules that should be returned Returns ------- molecules: List[openff.toolkit.topology.Molecule] A list of openff.toolkit.topology.Molecule instances excluding the input molecule. """ from openeye import oequacpac oemol = self.to_openeye(molecule=molecule) tautomers = [] # set the options tautomer_options = oequacpac.OETautomerOptions() tautomer_options.SetApplyWarts(False) tautomer_options.SetMaxTautomersGenerated(max_states + 1) tautomer_options.SetSaveStereo(True) # this aligns the outputs of rdkit and openeye for the example cases tautomer_options.SetCarbonHybridization(False) for tautomer in oequacpac.OEEnumerateTautomers(oemol, tautomer_options): # remove the input tautomer from the output taut = self.from_openeye( tautomer, allow_undefined_stereo=True, _cls=molecule.__class__ ) if taut != molecule: tautomers.append( self.from_openeye( tautomer, allow_undefined_stereo=True, _cls=molecule.__class__ ) ) return tautomers @staticmethod def _check_mol2_gaff_atom_type(molecule, file_path=None): """Attempts to detect the presence of GAFF atom types in a molecule loaded from a mol2 file. For now, this raises a ``GAFFAtomTypeWarning`` if the molecule include Osmium and Holmium atoms, which have GAFF types OS and HO respectively. Parameters ---------- molecule : openff.toolkit.topology.molecule.Molecule The loaded molecule. file_path : str, optional The path to the mol2 file. This is used exclusively to make the error message more meaningful. """ # Handle default. if file_path is None: file_path = "" else: # Append a ':' character that will separate the file # path from the molecule string representation. file_path = file_path + ":" # atomic_number: (GAFF_type, element_name) warning_atomic_numbers = {76: ("OS", "Osmium"), 67: ("HO", "Holmium")} for atom in molecule.atoms: try: atom_type, element_name = warning_atomic_numbers[atom.atomic_number] except KeyError: pass else: import warnings warn_msg = ( f'OpenEye interpreted the type "{atom_type}" in {file_path}{molecule.name}' f" as {element_name}. Does your mol2 file uses Tripos SYBYL atom types?" " Other atom types such as GAFF are not supported." ) warnings.warn(warn_msg, GAFFAtomTypeWarning) @staticmethod def _openeye_cip_atom_stereochemistry(oemol, oeatom): """ Determine CIP stereochemistry (R/S) for the specified atom Parameters ---------- oemol : openeye.oechem.OEMolBase The molecule of interest oeatom : openeye.oechem.OEAtomBase The atom whose stereochemistry is to be computed Returns ------- stereochemistry : str 'R', 'S', or None if no stereochemistry is specified or the atom is not a stereocenter """ from openeye import oechem if not oeatom.HasStereoSpecified(): # No stereochemical information has been stored, so this could be unknown stereochemistry # TODO: Should we raise an exception? return None cip = oechem.OEPerceiveCIPStereo(oemol, oeatom) if cip == oechem.OECIPAtomStereo_S: return "S" elif cip == oechem.OECIPAtomStereo_R: return "R" elif cip == oechem.OECIPAtomStereo_NotStereo: # Not a stereocenter # TODO: Should this be a different case from ``None``? return None @staticmethod def _openeye_cip_bond_stereochemistry(oemol, oebond): """ Determine CIP stereochemistry (E/Z) for the specified bond Parameters ---------- oemol : openeye.oechem.OEMolBase The molecule of interest oebond : openeye.oechem.OEBondBase The bond whose stereochemistry is to be computed Returns ------- stereochemistry : str 'E', 'Z', or None if stereochemistry is unspecified or the bond is not a stereo bond """ from openeye import oechem if not oebond.HasStereoSpecified(): # No stereochemical information has been stored, so this could be unknown stereochemistry # TODO: Should we raise an exception? return None cip = oechem.OEPerceiveCIPStereo(oemol, oebond) if cip == oechem.OECIPBondStereo_E: return "E" elif cip == oechem.OECIPBondStereo_Z: return "Z" elif cip == oechem.OECIPBondStereo_NotStereo: return None @staticmethod def from_openeye(oemol, allow_undefined_stereo=False, _cls=None): """ Create a Molecule from an OpenEye molecule. If the OpenEye molecule has implicit hydrogens, this function will make them explicit. ``OEAtom`` s have a different set of allowed value for partial charges than ``openff.toolkit.topology.Molecule`` s. In the OpenEye toolkits, partial charges are stored on individual ``OEAtom`` s, and their values are initialized to ``0.0``. In the Open Force Field Toolkit, an ``openff.toolkit.topology.Molecule``'s ``partial_charges`` attribute is initialized to ``None`` and can be set to a ``simtk.unit.Quantity``-wrapped numpy array with units of elementary charge. The Open Force Field Toolkit considers an ``OEMol`` where every ``OEAtom`` has a partial charge of ``float('nan')`` to be equivalent to an Open Force Field Toolkit `Molecule`'s ``partial_charges = None``. This assumption is made in both ``to_openeye`` and ``from_openeye``. .. warning :: This API is experimental and subject to change. Parameters ---------- oemol : openeye.oechem.OEMol An OpenEye molecule allow_undefined_stereo : bool, default=False If false, raises an exception if oemol contains undefined stereochemistry. _cls : class Molecule constructor Returns ------- molecule : openff.toolkit.topology.Molecule An OpenFF molecule Examples -------- Create a Molecule from an OpenEye OEMol >>> from openeye import oechem >>> from openff.toolkit.tests.utils import get_data_file_path >>> ifs = oechem.oemolistream(get_data_file_path('systems/monomers/ethanol.mol2')) >>> oemols = list(ifs.GetOEGraphMols()) >>> toolkit_wrapper = OpenEyeToolkitWrapper() >>> molecule = toolkit_wrapper.from_openeye(oemols[0]) """ import math from openeye import oechem oemol = oechem.OEMol(oemol) # Add explicit hydrogens if they're implicit if oechem.OEHasImplicitHydrogens(oemol): oechem.OEAddExplicitHydrogens(oemol) # TODO: Is there any risk to perceiving aromaticity here instead of later? oechem.OEAssignAromaticFlags(oemol, oechem.OEAroModel_MDL) oechem.OEPerceiveChiral(oemol) # Check that all stereo is specified # Potentially better OE stereo check: OEFlipper — Toolkits - - Python # https: // docs.eyesopen.com / toolkits / python / omegatk / OEConfGenFunctions / OEFlipper.html unspec_chiral = False unspec_db = False problematic_atoms = list() problematic_bonds = list() for oeatom in oemol.GetAtoms(): if oeatom.IsChiral(): if not (oeatom.HasStereoSpecified()): unspec_chiral = True problematic_atoms.append(oeatom) for oebond in oemol.GetBonds(): if oebond.IsChiral(): if not (oebond.HasStereoSpecified()): unspec_db = True problematic_bonds.append(oebond) if unspec_chiral or unspec_db: def oeatom_to_str(oeatom): return "atomic num: {}, name: {}, idx: {}, aromatic: {}, chiral: {}".format( oeatom.GetAtomicNum(), oeatom.GetName(), oeatom.GetIdx(), oeatom.IsAromatic(), oeatom.IsChiral(), ) def oebond_to_str(oebond): return "order: {}, chiral: {}".format( oebond.GetOrder(), oebond.IsChiral() ) def describe_oeatom(oeatom): description = "Atom {} with bonds:".format(oeatom_to_str(oeatom)) for oebond in oeatom.GetBonds(): description += "\nbond {} to atom {}".format( oebond_to_str(oebond), oeatom_to_str(oebond.GetNbr(oeatom)) ) return description msg = ( "OEMol has unspecified stereochemistry. " "oemol.GetTitle(): {}\n".format(oemol.GetTitle()) ) if len(problematic_atoms) != 0: msg += "Problematic atoms are:\n" for problematic_atom in problematic_atoms: msg += describe_oeatom(problematic_atom) + "\n" if len(problematic_bonds) != 0: msg += "Problematic bonds are: {}\n".format(problematic_bonds) if allow_undefined_stereo: msg = "Warning (not error because allow_undefined_stereo=True): " + msg logger.warning(msg) else: msg = "Unable to make OFFMol from OEMol: " + msg raise UndefinedStereochemistryError(msg) if _cls is None: from openff.toolkit.topology.molecule import Molecule _cls = Molecule molecule = _cls() molecule.name = oemol.GetTitle() # Copy any attached SD tag information for dp in oechem.OEGetSDDataPairs(oemol): molecule._properties[dp.GetTag()] = dp.GetValue() map_atoms = dict() # {oemol_idx: molecule_idx} atom_mapping = {} for oeatom in oemol.GetAtoms(): oe_idx = oeatom.GetIdx() map_id = oeatom.GetMapIdx() atomic_number = oeatom.GetAtomicNum() formal_charge = oeatom.GetFormalCharge() * unit.elementary_charge is_aromatic = oeatom.IsAromatic() stereochemistry = OpenEyeToolkitWrapper._openeye_cip_atom_stereochemistry( oemol, oeatom ) # stereochemistry = self._openeye_cip_atom_stereochemistry(oemol, oeatom) name = "" if oeatom.HasData("name"): name = oeatom.GetData("name") atom_index = molecule._add_atom( atomic_number, formal_charge, is_aromatic, stereochemistry=stereochemistry, name=name, ) map_atoms[ oe_idx ] = atom_index # store for mapping oeatom to molecule atom indices below atom_mapping[atom_index] = map_id # If we have a full / partial atom map add it to the molecule. Zeroes 0 # indicates no mapping if {*atom_mapping.values()} != {0}: molecule._properties["atom_map"] = { idx: map_idx for idx, map_idx in atom_mapping.items() if map_idx != 0 } for oebond in oemol.GetBonds(): atom1_index = map_atoms[oebond.GetBgnIdx()] atom2_index = map_atoms[oebond.GetEndIdx()] bond_order = oebond.GetOrder() is_aromatic = oebond.IsAromatic() stereochemistry = OpenEyeToolkitWrapper._openeye_cip_bond_stereochemistry( oemol, oebond ) if oebond.HasData("fractional_bond_order"): fractional_bond_order = oebond.GetData("fractional_bond_order") else: fractional_bond_order = None molecule._add_bond( atom1_index, atom2_index, bond_order, is_aromatic=is_aromatic, stereochemistry=stereochemistry, fractional_bond_order=fractional_bond_order, ) # TODO: Copy conformations, if present # TODO: Come up with some scheme to know when to import coordinates # From SMILES: no # From MOL2: maybe # From other: maybe if hasattr(oemol, "GetConfs"): for conf in oemol.GetConfs(): n_atoms = molecule.n_atoms positions = unit.Quantity( np.zeros(shape=[n_atoms, 3], dtype=np.float64), unit.angstrom ) for oe_id in conf.GetCoords().keys(): off_atom_coords = unit.Quantity( conf.GetCoords()[oe_id], unit.angstrom ) off_atom_index = map_atoms[oe_id] positions[off_atom_index, :] = off_atom_coords if (positions == 0 * unit.angstrom).all() and n_atoms > 1: continue molecule._add_conformer(positions) # Copy partial charges, if present partial_charges = unit.Quantity( np.zeros(shape=molecule.n_atoms, dtype=np.float64), unit=unit.elementary_charge, ) # If all OEAtoms have a partial charge of NaN, then the OFFMol should # have its partial_charges attribute set to None any_partial_charge_is_not_nan = False for oe_atom in oemol.GetAtoms(): oe_idx = oe_atom.GetIdx() off_idx = map_atoms[oe_idx] unitless_charge = oe_atom.GetPartialCharge() if not math.isnan(unitless_charge): any_partial_charge_is_not_nan = True # break charge = unitless_charge * unit.elementary_charge partial_charges[off_idx] = charge if any_partial_charge_is_not_nan: molecule.partial_charges = partial_charges else: molecule.partial_charges = None return molecule @staticmethod def to_openeye(molecule, aromaticity_model=DEFAULT_AROMATICITY_MODEL): """ Create an OpenEye molecule using the specified aromaticity model ``OEAtom`` s have a different set of allowed value for partial charges than ``openff.toolkit.topology.Molecule``\ s. In the OpenEye toolkits, partial charges are stored on individual ``OEAtom``\ s, and their values are initialized to ``0.0``. In the Open Force Field Toolkit, an``openff.toolkit.topology.Molecule``'s ``partial_charges`` attribute is initialized to ``None`` and can be set to a ``simtk.unit.Quantity``-wrapped numpy array with units of elementary charge. The Open Force Field Toolkit considers an ``OEMol`` where every ``OEAtom`` has a partial charge of ``float('nan')`` to be equivalent to an Open Force Field Toolkit ``Molecule``'s ``partial_charges = None``. This assumption is made in both ``to_openeye`` and ``from_openeye``. .. todo :: * Should the aromaticity model be specified in some other way? .. warning :: This API is experimental and subject to change. Parameters ---------- molecule : openff.toolkit.topology.molecule.Molecule object The molecule to convert to an OEMol aromaticity_model : str, optional, default=DEFAULT_AROMATICITY_MODEL The aromaticity model to use Returns ------- oemol : openeye.oechem.OEMol An OpenEye molecule Examples -------- Create an OpenEye molecule from a Molecule >>> from openff.toolkit.topology import Molecule >>> toolkit_wrapper = OpenEyeToolkitWrapper() >>> molecule = Molecule.from_smiles('CC') >>> oemol = toolkit_wrapper.to_openeye(molecule) """ from openeye import oechem if hasattr(oechem, aromaticity_model): oe_aro_model = getattr(oechem, aromaticity_model) else: raise ValueError( "Error: provided aromaticity model not recognized by oechem." ) oemol = oechem.OEMol() # if not(molecule.name is None): oemol.SetTitle(molecule.name) map_atoms = {} # {off_idx : oe_idx} # Add atoms oemol_atoms = list() # list of corresponding oemol atoms for atom in molecule.atoms: oeatom = oemol.NewAtom(atom.atomic_number) oeatom.SetFormalCharge( atom.formal_charge.value_in_unit(unit.elementary_charge) ) # simtk.unit.Quantity(1, unit.elementary_charge) # TODO: Do we want to provide _any_ pathway for Atom.is_aromatic to influence the OEMol? # oeatom.SetAromatic(atom.is_aromatic) oeatom.SetData("name", atom.name) oeatom.SetPartialCharge(float("nan")) oemol_atoms.append(oeatom) map_atoms[atom.molecule_atom_index] = oeatom.GetIdx() # Add bonds oemol_bonds = list() # list of corresponding oemol bonds for bond in molecule.bonds: # atom1_index = molecule.atoms.index(bond.atom1) # atom2_index = molecule.atoms.index(bond.atom2) atom1_index = bond.atom1_index atom2_index = bond.atom2_index oebond = oemol.NewBond(oemol_atoms[atom1_index], oemol_atoms[atom2_index]) oebond.SetOrder(bond.bond_order) # TODO: Do we want to provide _any_ pathway for Bond.is_aromatic to influence the OEMol? # oebond.SetAromatic(bond.is_aromatic) if not (bond.fractional_bond_order is None): oebond.SetData("fractional_bond_order", bond.fractional_bond_order) oemol_bonds.append(oebond) oechem.OEAssignAromaticFlags(oemol, oe_aro_model) # Set atom stereochemistry now that all connectivity is in place for atom, oeatom in zip(molecule.atoms, oemol_atoms): if not atom.stereochemistry: continue # Set arbitrary initial stereochemistry neighs = [n for n in oeatom.GetAtoms()] oeatom.SetStereo( neighs, oechem.OEAtomStereo_Tetra, oechem.OEAtomStereo_Right ) # Flip chirality if stereochemistry isincorrect oeatom_stereochemistry = ( OpenEyeToolkitWrapper._openeye_cip_atom_stereochemistry(oemol, oeatom) ) if oeatom_stereochemistry != atom.stereochemistry: # Flip the stereochemistry oeatom.SetStereo( neighs, oechem.OEAtomStereo_Tetra, oechem.OEAtomStereo_Left ) # Verify it matches now as a sanity check oeatom_stereochemistry = ( OpenEyeToolkitWrapper._openeye_cip_atom_stereochemistry( oemol, oeatom ) ) if oeatom_stereochemistry != atom.stereochemistry: raise Exception( "Programming error: OpenEye atom stereochemistry assumptions failed." ) # Set bond stereochemistry for bond, oebond in zip(molecule.bonds, oemol_bonds): if not bond.stereochemistry: continue atom1_index = bond.molecule.atoms.index(bond.atom1) atom2_index = bond.molecule.atoms.index(bond.atom2) # Set arbitrary initial stereochemistry oeatom1, oeatom2 = oemol_atoms[atom1_index], oemol_atoms[atom2_index] oeatom1_neighbor = [n for n in oeatom1.GetAtoms() if not n == oeatom2][0] oeatom2_neighbor = [n for n in oeatom2.GetAtoms() if not n == oeatom1][0] # oebond.SetStereo([oeatom1, oeatom2], oechem.OEBondStereo_CisTrans, oechem.OEBondStereo_Cis) oebond.SetStereo( [oeatom1_neighbor, oeatom2_neighbor], oechem.OEBondStereo_CisTrans, oechem.OEBondStereo_Cis, ) # Flip stereochemistry if incorrect oebond_stereochemistry = ( OpenEyeToolkitWrapper._openeye_cip_bond_stereochemistry(oemol, oebond) ) if oebond_stereochemistry != bond.stereochemistry: # Flip the stereochemistry oebond.SetStereo( [oeatom1_neighbor, oeatom2_neighbor], oechem.OEBondStereo_CisTrans, oechem.OEBondStereo_Trans, ) # Verify it matches now as a sanity check oebond_stereochemistry = ( OpenEyeToolkitWrapper._openeye_cip_bond_stereochemistry( oemol, oebond ) ) if oebond_stereochemistry != bond.stereochemistry: raise Exception( "Programming error: OpenEye bond stereochemistry assumptions failed." ) # Retain conformations, if present if molecule.n_conformers != 0: oemol.DeleteConfs() for conf in molecule._conformers: # OE needs a 1 x (3*n_Atoms) double array as input flat_coords = np.zeros(shape=oemol.NumAtoms() * 3, dtype=np.float64) for index, oe_idx in map_atoms.items(): (x, y, z) = conf[index, :] / unit.angstrom flat_coords[(3 * oe_idx)] = x flat_coords[(3 * oe_idx) + 1] = y flat_coords[(3 * oe_idx) + 2] = z oecoords = oechem.OEFloatArray(flat_coords) oemol.NewConf(oecoords) # Retain charges, if present. All atoms are initialized above with a partial charge of NaN. if molecule._partial_charges is not None: oe_indexed_charges = np.zeros(shape=molecule.n_atoms, dtype=np.float64) for off_idx, charge in enumerate(molecule._partial_charges): oe_idx = map_atoms[off_idx] charge_unitless = charge / unit.elementary_charge oe_indexed_charges[oe_idx] = charge_unitless # TODO: This loop below fails if we try to use an "enumerate"-style loop. # It's worth investigating whether we make this assumption elsewhere in the codebase, since # the OE docs may indicate that this sort of usage is a very bad thing to do. # https://docs.eyesopen.com/toolkits/python/oechemtk/atombondindices.html#indices-for-molecule-lookup-considered-harmful # for oe_idx, oe_atom in enumerate(oemol.GetAtoms()): for oe_atom in oemol.GetAtoms(): oe_idx = oe_atom.GetIdx() oe_atom.SetPartialCharge(oe_indexed_charges[oe_idx]) # Retain properties, if present for key, value in molecule.properties.items(): oechem.OESetSDData(oemol, str(key), str(value)) # Clean Up phase # The only feature of a molecule that wasn't perceived above seemed to be ring connectivity, better to run it # here then for someone to inquire about ring sizes and get 0 when it shouldn't be oechem.OEFindRingAtomsAndBonds(oemol) return oemol def to_smiles(self, molecule, isomeric=True, explicit_hydrogens=True, mapped=False): """ Uses the OpenEye toolkit to convert a Molecule into a SMILES string. A partially mapped smiles can also be generated for atoms of interest by supplying an `atom_map` to the properties dictionary. Parameters ---------- molecule : An openff.toolkit.topology.Molecule The molecule to convert into a SMILES. isomeric: bool optional, default= True return an isomeric smiles explicit_hydrogens: bool optional, default=True return a smiles string containing all hydrogens explicitly mapped: bool optional, default=False return a explicit hydrogen mapped smiles, the atoms to be mapped can be controlled by supplying an atom map into the properties dictionary. If no mapping is passed all atoms will be mapped in order, else an atom map dictionary from the current atom index to the map id should be supplied with no duplicates. The map ids (values) should start from 0 or 1. Returns ------- smiles : str The SMILES of the input molecule. """ from openeye import oechem oemol = self.to_openeye(molecule) # this sets up the default settings following the old DEFAULT flag # more information on flags can be found here # <https://docs.eyesopen.com/toolkits/python/oechemtk/OEChemConstants/OESMILESFlag.html#OEChem::OESMILESFlag> smiles_options = ( oechem.OESMILESFlag_Canonical | oechem.OESMILESFlag_Isotopes | oechem.OESMILESFlag_RGroups ) # check if we want an isomeric smiles if isomeric: # add the atom and bond stereo flags smiles_options |= ( oechem.OESMILESFlag_AtomStereo | oechem.OESMILESFlag_BondStereo ) if explicit_hydrogens: # add the hydrogen flag smiles_options |= oechem.OESMILESFlag_Hydrogens if mapped: assert explicit_hydrogens is True, ( "Mapped smiles require all hydrogens and " "stereochemsitry to be defined to retain order" ) # if we only want to map specific atoms check for an atom map atom_map = molecule._properties.get("atom_map", None) if atom_map is not None: # make sure there are no repeated indices map_ids = set(atom_map.values()) if len(map_ids) < len(atom_map): atom_map = None elif 0 in atom_map.values(): # we need to increment the map index for atom, map in atom_map.items(): atom_map[atom] = map + 1 if atom_map is None: # now we need to add the atom map to the atoms for oeatom in oemol.GetAtoms(): oeatom.SetMapIdx(oeatom.GetIdx() + 1) else: for atom in oemol.GetAtoms(): try: # try to set the atom map map_idx = atom_map[atom.GetIdx()] atom.SetMapIdx(map_idx) except KeyError: continue smiles_options |= oechem.OESMILESFlag_AtomMaps smiles = oechem.OECreateSmiString(oemol, smiles_options) return smiles def to_inchi(self, molecule, fixed_hydrogens=False): """ Create an InChI string for the molecule using the RDKit Toolkit. InChI is a standardised representation that does not capture tautomers unless specified using the fixed hydrogen layer. For information on InChi see here https://iupac.org/who-we-are/divisions/division-details/inchi/ Parameters ---------- molecule : An openff.toolkit.topology.Molecule The molecule to convert into a SMILES. fixed_hydrogens: bool, default=False If a fixed hydrogen layer should be added to the InChI, if `True` this will produce a non standard specific InChI string of the molecule. Returns -------- inchi: str The InChI string of the molecule. """ from openeye import oechem oemol = self.to_openeye(molecule) if fixed_hydrogens: opts = oechem.OEInChIOptions() opts.SetFixedHLayer(True) inchi = oechem.OEMolToInChI(oemol) else: inchi = oechem.OEMolToSTDInChI(oemol) return inchi def to_inchikey(self, molecule, fixed_hydrogens=False): """ Create an InChIKey for the molecule using the RDKit Toolkit. InChIKey is a standardised representation that does not capture tautomers unless specified using the fixed hydrogen layer. For information on InChi see here https://iupac.org/who-we-are/divisions/division-details/inchi/ Parameters ---------- molecule : An openff.toolkit.topology.Molecule The molecule to convert into a SMILES. fixed_hydrogens: bool, default=False If a fixed hydrogen layer should be added to the InChI, if `True` this will produce a non standard specific InChI string of the molecule. Returns -------- inchi_key: str The InChIKey representation of the molecule. """ from openeye import oechem oemol = self.to_openeye(molecule) if fixed_hydrogens: opts = oechem.OEInChIOptions() opts.SetFixedHLayer(True) inchi_key = oechem.OEMolToInChIKey(oemol) else: inchi_key = oechem.OEMolToSTDInChIKey(oemol) return inchi_key def to_iupac(self, molecule): """Generate IUPAC name from Molecule Parameters ---------- molecule : An openff.toolkit.topology.Molecule The molecule to convert into a SMILES. Returns ------- iupac_name : str IUPAC name of the molecule Examples -------- >>> from openff.toolkit.topology import Molecule >>> from openff.toolkit.utils import get_data_file_path >>> sdf_filepath = get_data_file_path('molecules/ethanol.sdf') >>> molecule = Molecule(sdf_filepath) >>> toolkit = OpenEyeToolkitWrapper() >>> iupac_name = toolkit.to_iupac(molecule) """ from openeye import oeiupac oemol = self.to_openeye(molecule) return oeiupac.OECreateIUPACName(oemol) def canonical_order_atoms(self, molecule): """ Canonical order the atoms in the molecule using the OpenEye toolkit. Parameters ---------- molecule: openff.toolkit.topology.Molecule The input molecule Returns ------- molecule : openff.toolkit.topology.Molecule The input molecule, with canonically-indexed atoms and bonds. """ from openeye import oechem oemol = self.to_openeye(molecule) oechem.OECanonicalOrderAtoms(oemol) oechem.OECanonicalOrderBonds(oemol) # reorder the iterator vatm = [] for atom in oemol.GetAtoms(): if atom.GetAtomicNum() != oechem.OEElemNo_H: vatm.append(atom) oemol.OrderAtoms(vatm) vbnd = [] for bond in oemol.GetBonds(): if ( bond.GetBgn().GetAtomicNum() != oechem.OEElemNo_H and bond.GetEnd().GetAtomicNum() != oechem.OEElemNo_H ): vbnd.append(bond) oemol.OrderBonds(vbnd) oemol.Sweep() for bond in oemol.GetBonds(): if bond.GetBgnIdx() > bond.GetEndIdx(): bond.SwapEnds() return self.from_openeye( oemol, allow_undefined_stereo=True, _cls=molecule.__class__ ) def from_smiles( self, smiles, hydrogens_are_explicit=False, allow_undefined_stereo=False, _cls=None, ): """ Create a Molecule from a SMILES string using the OpenEye toolkit. .. warning :: This API is experimental and subject to change. Parameters ---------- smiles : str The SMILES string to turn into a molecule hydrogens_are_explicit : bool, default = False If False, OE will perform hydrogen addition using OEAddExplicitHydrogens allow_undefined_stereo : bool, default=False Whether to accept SMILES with undefined stereochemistry. If False, an exception will be raised if a SMILES with undefined stereochemistry is passed into this function. _cls : class Molecule constructor Returns ------- molecule : openff.toolkit.topology.Molecule An OpenFF style molecule. """ from openeye import oechem oemol = oechem.OEGraphMol() oechem.OESmilesToMol(oemol, smiles) if not (hydrogens_are_explicit): result = oechem.OEAddExplicitHydrogens(oemol) if not result: raise ValueError( "Addition of explicit hydrogens failed in from_openeye" ) elif hydrogens_are_explicit and oechem.OEHasImplicitHydrogens(oemol): raise ValueError( f"'hydrogens_are_explicit' was specified as True, but OpenEye Toolkit interpreted " f"SMILES '{smiles}' as having implicit hydrogen. If this SMILES is intended to " f"express all explicit hydrogens in the molecule, then you should construct the " f"desired molecule as an OEMol (where oechem.OEHasImplicitHydrogens(oemol) returns " f"False), and then use Molecule.from_openeye() to create the desired OFFMol." ) # Set partial charges to None, since they couldn't have been stored in a SMILES for atom in oemol.GetAtoms(): atom.SetPartialCharge(float("nan")) molecule = self.from_openeye( oemol, _cls=_cls, allow_undefined_stereo=allow_undefined_stereo ) return molecule def from_inchi(self, inchi, allow_undefined_stereo=False, _cls=None): """ Construct a Molecule from a InChI representation Parameters ---------- inchi : str The InChI representation of the molecule. allow_undefined_stereo : bool, default=False Whether to accept InChI with undefined stereochemistry. If False, an exception will be raised if a InChI with undefined stereochemistry is passed into this function. _cls : class Molecule constructor Returns ------- molecule : openff.toolkit.topology.Molecule """ from openeye import oechem # This calls the same functions as OESmilesToMol oemol = oechem.OEGraphMol() oechem.OEInChIToMol(oemol, inchi) # try and catch InChI parsing fails # if there are no atoms don't build the molecule if oemol.NumAtoms() == 0: raise RuntimeError( "There was an issue parsing the InChI string, please check and try again." ) molecule = self.from_openeye( oemol, allow_undefined_stereo=allow_undefined_stereo, _cls=_cls ) return molecule def from_iupac(self, iupac_name, allow_undefined_stereo=False, _cls=None, **kwargs): """ Construct a Molecule from an IUPAC name Parameters ---------- iupac_name : str The IUPAC or common name of the molecule. allow_undefined_stereo : bool, default=False Whether to accept a molecule name with undefined stereochemistry. If False, an exception will be raised if a molecule name with undefined stereochemistry is passed into this function. _cls : class Molecule constructor Returns ------- molecule : openff.toolkit.topology.Molecule """ from openeye import oechem, oeiupac oemol = oechem.OEMol() parsing_result = oeiupac.OEParseIUPACName(oemol, iupac_name) if not parsing_result: raise InvalidIUPACNameError( f"OpenEye failed to parse {iupac_name} as a IUPAC name" ) oechem.OETriposAtomNames(oemol) result = oechem.OEAddExplicitHydrogens(oemol) if not result: raise Exception("Addition of explicit hydrogens failed in from_iupac") molecule = self.from_openeye( oemol, allow_undefined_stereo=allow_undefined_stereo, _cls=_cls, **kwargs ) return molecule def generate_conformers( self, molecule, n_conformers=1, rms_cutoff=None, clear_existing=True ): """ Generate molecule conformers using OpenEye Omega. .. warning :: This API is experimental and subject to change. .. todo :: * which parameters should we expose? (or can we implement a general system with \*\*kwargs?) * will the coordinates be returned in the OpenFF Molecule's own indexing system? Or is there a chance that they'll get reindexed when we convert the input into an OEmol? Parameters ---------- molecule : a :class:`Molecule` The molecule to generate conformers for. n_conformers : int, default=1 The maximum number of conformers to generate. rms_cutoff : simtk.Quantity-wrapped float, in units of distance, optional, default=None The minimum RMS value at which two conformers are considered redundant and one is deleted. If None, the cutoff is set to 1 Angstrom clear_existing : bool, default=True Whether to overwrite existing conformers for the molecule """ from openeye import oeomega oemol = self.to_openeye(molecule) omega = oeomega.OEOmega() omega.SetMaxConfs(n_conformers) omega.SetCanonOrder(False) omega.SetSampleHydrogens(True) omega.SetEnergyWindow(15.0) # unit? if rms_cutoff is None: omega.SetRMSThreshold(1.0) else: omega.SetRMSThreshold(rms_cutoff.value_in_unit(unit.angstrom)) # Don't generate random stereoisomer if not specified omega.SetStrictStereo(True) status = omega(oemol) if status is False: omega.SetStrictStereo(False) new_status = omega(oemol) if new_status is False: raise Exception("OpenEye Omega conformer generation failed") molecule2 = self.from_openeye( oemol, allow_undefined_stereo=True, _cls=molecule.__class__ ) if clear_existing: molecule._conformers = list() for conformer in molecule2._conformers: molecule._add_conformer(conformer) def apply_elf_conformer_selection( self, molecule: "Molecule", percentage: float = 2.0, limit: int = 10, ): """Applies the `ELF method <https://docs.eyesopen.com/toolkits/python/quacpactk/molchargetheory.html#elf-conformer-selection>`_ to select a set of diverse conformers which have minimal electrostatically strongly interacting functional groups from a molecules conformers. Notes ----- * The input molecule should have a large set of conformers already generated to select the ELF conformers from. * The selected conformers will be retained in the `molecule.conformers` list while unselected conformers will be discarded. See Also -------- RDKitToolkitWrapper.apply_elf_conformer_selection Parameters ---------- molecule The molecule which contains the set of conformers to select from. percentage The percentage of conformers with the lowest electrostatic interaction energies to greedily select from. limit The maximum number of conformers to select. """ from openeye import oechem, oequacpac if molecule.n_conformers == 0: return oe_molecule = molecule.to_openeye() # Select a subset of the OMEGA generated conformers using the ELF10 method. oe_elf_options = oequacpac.OEELFOptions() oe_elf_options.SetElfLimit(limit) oe_elf_options.SetPercent(percentage) oe_elf = oequacpac.OEELF(oe_elf_options) output_stream = oechem.oeosstream() oechem.OEThrow.SetOutputStream(output_stream) oechem.OEThrow.Clear() status = oe_elf.Select(oe_molecule) oechem.OEThrow.SetOutputStream(oechem.oeerr) output_string = output_stream.str().decode("UTF-8") output_string = output_string.replace("Warning: ", "") output_string = re.sub("^: +", "", output_string, flags=re.MULTILINE) output_string = re.sub("\n$", "", output_string) # Check to make sure the call to OE was succesful, and re-route any # non-fatal warnings to the correct logger. if not status: raise RuntimeError("\n" + output_string) elif len(output_string) > 0: logger.warning(output_string) # Extract and store the ELF conformers on the input molecule. conformers = [] for oe_conformer in oe_molecule.GetConfs(): conformer = np.zeros((oe_molecule.NumAtoms(), 3)) for atom_index, coordinates in oe_conformer.GetCoords().items(): conformer[atom_index, :] = coordinates conformers.append(conformer * unit.angstrom) molecule._conformers = conformers def assign_partial_charges( self, molecule, partial_charge_method=None, use_conformers=None, strict_n_conformers=False, _cls=None, ): """ Compute partial charges with OpenEye quacpac, and assign the new values to the partial_charges attribute. .. warning :: This API is experimental and subject to change. .. todo :: * Should the default be ELF? * Can we expose more charge models? Parameters ---------- molecule : openff.toolkit.topology.Molecule Molecule for which partial charges are to be computed partial_charge_method : str, optional, default=None The charge model to use. One of ['amberff94', 'mmff', 'mmff94', `am1-mulliken`, 'am1bcc', 'am1bccnosymspt', 'am1bccelf10'] If None, 'am1-mulliken' will be used. use_conformers : iterable of simtk.unit.Quantity-wrapped numpy arrays, each with shape (n_atoms, 3) and dimension of distance. Optional, default = None Coordinates to use for partial charge calculation. If None, an appropriate number of conformers will be generated. strict_n_conformers : bool, default=False Whether to raise an exception if an invalid number of conformers is provided for the given charge method. If this is False and an invalid number of conformers is found, a warning will be raised. _cls : class Molecule constructor Raises ------ ChargeMethodUnavailableError if the requested charge method can not be handled by this toolkit ChargeCalculationError if the charge method is supported by this toolkit, but fails """ import numpy as np from openeye import oechem, oequacpac from openff.toolkit.topology import Molecule SUPPORTED_CHARGE_METHODS = { "am1bcc": { "oe_charge_method": oequacpac.OEAM1BCCCharges, "min_confs": 1, "max_confs": 1, "rec_confs": 1, }, "am1-mulliken": { "oe_charge_method": oequacpac.OEAM1Charges, "min_confs": 1, "max_confs": 1, "rec_confs": 1, }, "gasteiger": { "oe_charge_method": oequacpac.OEGasteigerCharges, "min_confs": 0, "max_confs": 0, "rec_confs": 0, }, "mmff94": { "oe_charge_method": oequacpac.OEMMFF94Charges, "min_confs": 0, "max_confs": 0, "rec_confs": 0, }, "am1bccnosymspt": { "oe_charge_method": oequacpac.OEAM1BCCCharges, "min_confs": 1, "max_confs": 1, "rec_confs": 1, }, "am1elf10": { "oe_charge_method": oequacpac.OEELFCharges( oequacpac.OEAM1Charges(optimize=True, symmetrize=True), 10 ), "min_confs": 1, "max_confs": None, "rec_confs": 500, }, "am1bccelf10": { "oe_charge_method": oequacpac.OEAM1BCCELF10Charges, "min_confs": 1, "max_confs": None, "rec_confs": 500, }, } if partial_charge_method is None: partial_charge_method = "am1-mulliken" partial_charge_method = partial_charge_method.lower() if partial_charge_method not in SUPPORTED_CHARGE_METHODS: raise ChargeMethodUnavailableError( f"partial_charge_method '{partial_charge_method}' is not available from OpenEyeToolkitWrapper. " f"Available charge methods are {list(SUPPORTED_CHARGE_METHODS.keys())} " ) charge_method = SUPPORTED_CHARGE_METHODS[partial_charge_method] if _cls is None: from openff.toolkit.topology.molecule import Molecule _cls = Molecule # Make a temporary copy of the molecule, since we'll be messing with its conformers mol_copy = _cls(molecule) if use_conformers is None: if charge_method["rec_confs"] == 0: mol_copy._conformers = None else: self.generate_conformers( mol_copy, n_conformers=charge_method["rec_confs"], rms_cutoff=0.25 * unit.angstrom, ) # TODO: What's a "best practice" RMS cutoff to use here? else: mol_copy._conformers = None for conformer in use_conformers: mol_copy._add_conformer(conformer) self._check_n_conformers( mol_copy, partial_charge_method=partial_charge_method, min_confs=charge_method["min_confs"], max_confs=charge_method["max_confs"], strict_n_conformers=strict_n_conformers, ) oemol = mol_copy.to_openeye() errfs = oechem.oeosstream() oechem.OEThrow.SetOutputStream(errfs) oechem.OEThrow.Clear() # The OpenFF toolkit has always supported a version of AM1BCC with no geometry optimization # or symmetry correction. So we include this keyword to provide a special configuration of quacpac # if requested. if partial_charge_method == "am1bccnosymspt": optimize = False symmetrize = False quacpac_status = oequacpac.OEAssignCharges( oemol, charge_method["oe_charge_method"](optimize, symmetrize) ) else: oe_charge_method = charge_method["oe_charge_method"] if callable(oe_charge_method): oe_charge_method = oe_charge_method() quacpac_status = oequacpac.OEAssignCharges(oemol, oe_charge_method) oechem.OEThrow.SetOutputStream(oechem.oeerr) # restoring to original state # This logic handles errors encountered in #34, which can occur when using ELF10 conformer selection if not quacpac_status: oe_charge_engine = ( oequacpac.OEAM1Charges if partial_charge_method == "am1elf10" else oequacpac.OEAM1BCCCharges ) if "SelectElfPop: issue with removing trans COOH conformers" in ( errfs.str().decode("UTF-8") ): logger.warning( f"Warning: charge assignment involving ELF10 conformer selection failed due to a known bug (toolkit issue " f"#346). Downgrading to {oe_charge_engine.__name__} charge assignment for this molecule. More information" f"is available at https://github.com/openforcefield/openff-toolkit/issues/346" ) quacpac_status = oequacpac.OEAssignCharges(oemol, oe_charge_engine()) if quacpac_status is False: raise ChargeCalculationError( f'Unable to assign charges: {errfs.str().decode("UTF-8")}' ) # Extract and return charges ## TODO: Make sure atom mapping remains constant charges = unit.Quantity( np.zeros(shape=oemol.NumAtoms(), dtype=np.float64), unit.elementary_charge ) for oeatom in oemol.GetAtoms(): index = oeatom.GetIdx() charge = oeatom.GetPartialCharge() charge = charge * unit.elementary_charge charges[index] = charge molecule.partial_charges = charges def compute_partial_charges_am1bcc( self, molecule, use_conformers=None, strict_n_conformers=False ): """ Compute AM1BCC partial charges with OpenEye quacpac. This function will attempt to use the OEAM1BCCELF10 charge generation method, but may print a warning and fall back to normal OEAM1BCC if an error is encountered. This error is known to occur with some carboxylic acids, and is under investigation by OpenEye. .. warning :: This API is experimental and subject to change. Parameters ---------- molecule : Molecule Molecule for which partial charges are to be computed use_conformers : iterable of simtk.unit.Quantity-wrapped numpy arrays, each with shape (n_atoms, 3) and dimension of distance. Optional, default = None Coordinates to use for partial charge calculation. If None, an appropriate number of conformers will be generated. strict_n_conformers : bool, default=False Whether to raise an exception if an invalid number of conformers is provided. If this is False and an invalid number of conformers is found, a warning will be raised instead of an Exception. Returns ------- charges : numpy.array of shape (natoms) of type float The partial charges """ import warnings warnings.warn( "compute_partial_charges_am1bcc will be deprecated in an upcoming release. " "Use assign_partial_charges(partial_charge_method='am1bccelf10') instead.", DeprecationWarning, ) self.assign_partial_charges( molecule, partial_charge_method="am1bccelf10", use_conformers=use_conformers, strict_n_conformers=strict_n_conformers, ) return molecule.partial_charges def assign_fractional_bond_orders( self, molecule, bond_order_model=None, use_conformers=None, _cls=None ): """ Update and store list of bond orders this molecule. Bond orders are stored on each bond, in the `bond.fractional_bond_order` attribute. .. warning :: This API is experimental and subject to change. Parameters ---------- molecule : openff.toolkit.topology.molecule Molecule The molecule to assign wiberg bond orders to bond_order_model : str, optional, default=None The charge model to use. One of ['am1-wiberg', 'am1-wiberg-elf10', 'pm3-wiberg', 'pm3-wiberg-elf10']. If None, 'am1-wiberg' will be used. use_conformers : iterable of simtk.unit.Quantity(np.array) with shape (n_atoms, 3) and dimension of distance, optional, default=None The conformers to use for fractional bond order calculation. If None, an appropriate number of conformers will be generated by an available ToolkitWrapper. If the chosen ``bond_order_model`` is an ELF variant, the ELF conformer selection method will be applied to the provided conformers. _cls : class Molecule constructor """ from openeye import oechem, oequacpac if _cls is None: from openff.toolkit.topology.molecule import Molecule _cls = Molecule # Make a copy since we'll be messing with this molecule's conformers temp_mol = _cls(molecule) if bond_order_model is None: bond_order_model = "am1-wiberg" is_elf_method = bond_order_model in ["am1-wiberg-elf10", "pm3-wiberg-elf10"] if use_conformers is None: temp_mol.generate_conformers( n_conformers=1 if not is_elf_method else 500, # 0.05 is the recommended RMS when generating a 'Dense' amount of # conformers using Omega: https://docs.eyesopen.com/toolkits/python/ # omegatk/OEConfGenConstants/OEFragBuilderMode.html. rms_cutoff=None if not is_elf_method else 0.05 * unit.angstrom, ) else: temp_mol._conformers = None for conformer in use_conformers: temp_mol._add_conformer(conformer) if temp_mol.n_conformers == 0: raise Exception( "No conformers present in molecule submitted for fractional bond order calculation. Consider " "loading the molecule from a file with geometry already present or running " "molecule.generate_conformers() before calling molecule.compute_wiberg_bond_orders()" ) if is_elf_method: # Apply the ELF10 conformer selection method. temp_mol.apply_elf_conformer_selection() # Set the options to use when computing the WBOs. This is based on example at # https://docs.eyesopen.com/toolkits/python/quacpactk/examples_summary_wibergbondorders.html am1 = oequacpac.OEAM1() am1results = oequacpac.OEAM1Results() am1options = am1.GetOptions() if bond_order_model.startswith("am1-wiberg"): am1options.SetSemiMethod(oequacpac.OEMethodType_AM1) elif bond_order_model.startswith("pm3-wiberg"): # TODO: Make sure that modifying am1options actually works am1options.SetSemiMethod(oequacpac.OEMethodType_PM3) else: raise ValueError( f"Bond order model '{bond_order_model}' is not supported by " f"OpenEyeToolkitWrapper. Supported models are ['am1-wiberg', " f"'am1-wiberg-elf10', 'pm3-wiberg', 'pm3-wiberg-elf10']." ) # Convert the conformers into OE friendly objects to make setting them one # at a time easier. oe_conformers = [ oechem.OEFloatArray(conformer.value_in_unit(unit.angstrom).flatten()) for conformer in temp_mol.conformers ] oemol = self.to_openeye(temp_mol) bond_orders = defaultdict(list) for oe_conformer in oe_conformers: oemol.DeleteConfs() oemol.NewConf(oe_conformer) status = am1.CalcAM1(am1results, oemol) if status is False: raise Exception( "Unable to assign charges (in the process of calculating " "fractional bond orders)" ) for bond in oemol.GetBonds(): bond_orders[bond.GetIdx()].append( am1results.GetBondOrder(bond.GetBgnIdx(), bond.GetEndIdx()) ) # TODO: Will bonds always map back to the same index? Consider doing a # topology mapping. for bond_idx, conformer_bond_orders in bond_orders.items(): # Get bond order order = np.mean(conformer_bond_orders) mol_bond = molecule._bonds[bond_idx] mol_bond.fractional_bond_order = order def get_tagged_smarts_connectivity(self, smarts): """ Returns a tuple of tuples indicating connectivity between tagged atoms in a SMARTS string. Does not return bond order. Parameters ---------- smarts : str The tagged SMARTS to analyze Returns ------- unique_tags : tuple of int A sorted tuple of all unique tagged atom map indices. tagged_atom_connectivity : tuple of tuples of int, shape n_tagged_bonds x 2 A tuple of tuples, where each inner tuple is a pair of tagged atoms (tag_idx_1, tag_idx_2) which are bonded. The inner tuples are ordered smallest-to-largest, and the tuple of tuples is ordered lexically. So the return value for an improper torsion would be ((1, 2), (2, 3), (2, 4)). Raises ------ SMIRKSParsingError If OpenEye toolkit was unable to parse the provided smirks/tagged smarts """ from openeye import oechem from openff.toolkit.typing.chemistry import SMIRKSParsingError qmol = oechem.OEQMol() status = oechem.OEParseSmarts(qmol, smarts) if not status: raise SMIRKSParsingError( f"OpenEye Toolkit was unable to parse SMIRKS {smarts}" ) unique_tags = set() connections = set() for at1 in qmol.GetAtoms(): if at1.GetMapIdx() == 0: continue unique_tags.add(at1.GetMapIdx()) for at2 in at1.GetAtoms(): if at2.GetMapIdx() == 0: continue cxn_to_add = sorted([at1.GetMapIdx(), at2.GetMapIdx()]) connections.add(tuple(cxn_to_add)) connections = tuple(sorted(list(connections))) unique_tags = tuple(sorted(list(unique_tags))) return tuple(unique_tags), tuple(connections) @staticmethod def _find_smarts_matches( oemol, smarts, aromaticity_model=DEFAULT_AROMATICITY_MODEL ): """Find all sets of atoms in the provided OpenEye molecule that match the provided SMARTS string. Parameters ---------- oemol : openeye.oechem.OEMol or similar oemol to process with the SMIRKS in order to find matches smarts : str SMARTS string with any number of sequentially tagged atoms. If there are N tagged atoms numbered 1..N, the resulting matches will be N-tuples of atoms that match the corresponding tagged atoms. aromaticity_model : str, optional, default=None OpenEye aromaticity model designation as a string, such as ``OEAroModel_MDL``. Molecule is prepared with this aromaticity model prior to querying. Returns ------- matches : list of tuples of atoms indices within the ``oemol`` matches[index] is an N-tuple of atom numbers from the ``oemol`` Matches are returned in no guaranteed order. # TODO: What is returned if no matches are found? An empty list, or None? # TODO: Ensure that SMARTS numbers 1, 2, 3... are rendered into order of returnd matches indexed by 0, 1, 2... .. notes :: * Raises ``LicenseError`` if valid OpenEye tools license is not found, rather than causing program to terminate * Raises ``ValueError`` if ``smarts`` query is malformed """ from openeye import oechem from openeye.oechem import OESubSearch # Make a copy of molecule so we don't influence original (probably safer than deepcopy per C Bayly) mol = oechem.OEMol(oemol) # Set up query qmol = oechem.OEQMol() if not oechem.OEParseSmarts(qmol, smarts): raise ValueError(f"Error parsing SMARTS '{smarts}'") # Apply aromaticity model if type(aromaticity_model) == str: # Check if the user has provided a manually-specified aromaticity_model if hasattr(oechem, aromaticity_model): oearomodel = getattr(oechem, aromaticity_model) else: raise ValueError( "Error: provided aromaticity model not recognized by oechem." ) else: raise ValueError("Error: provided aromaticity model must be a string.") # OEPrepareSearch will clobber our desired aromaticity model if we don't sync up mol and qmol ahead of time # Prepare molecule oechem.OEClearAromaticFlags(mol) oechem.OEAssignAromaticFlags(mol, oearomodel) # If aromaticity model was provided, prepare query molecule oechem.OEClearAromaticFlags(qmol) oechem.OEAssignAromaticFlags(qmol, oearomodel) oechem.OEAssignHybridization(mol) oechem.OEAssignHybridization(qmol) # Build list of matches # TODO: The MoleculeImage mapping should preserve ordering of template molecule for equivalent atoms # and speed matching for larger molecules. unique = False # We require all matches, not just one of each kind substructure_search = OESubSearch(qmol) substructure_search.SetMaxMatches(0) oechem.OEPrepareSearch(mol, substructure_search) matches = list() for match in substructure_search.Match(mol, unique): # Compile list of atom indices that match the pattern tags atom_indices = dict() for matched_atom in match.GetAtoms(): if matched_atom.pattern.GetMapIdx() != 0: atom_indices[ matched_atom.pattern.GetMapIdx() - 1 ] = matched_atom.target.GetIdx() # Compress into list atom_indices = [atom_indices[index] for index in range(len(atom_indices))] # Convert to tuple matches.append(tuple(atom_indices)) return matches def find_smarts_matches(self, molecule, smarts, aromaticity_model="OEAroModel_MDL"): """ Find all SMARTS matches for the specified molecule, using the specified aromaticity model. .. warning :: This API is experimental and subject to change. Parameters ---------- molecule : openff.toolkit.topology.Molecule The molecule for which all specified SMARTS matches are to be located smarts : str SMARTS string with optional SMIRKS-style atom tagging aromaticity_model : str, optional, default='OEAroModel_MDL' Molecule is prepared with this aromaticity model prior to querying. .. note :: Currently, the only supported ``aromaticity_model`` is ``OEAroModel_MDL`` """ oemol = self.to_openeye(molecule) return self._find_smarts_matches( oemol, smarts, aromaticity_model=aromaticity_model ) def requires_openeye_module(module_name): def inner_decorator(function): @wraps(function) def wrapper(*args, **kwargs): try: module = importlib.import_module("openeye." + module_name) except (ImportError, ModuleNotFoundError): # TODO: Custom exception raise Exception("openeye." + module_name) try: license_func = OpenEyeToolkitWrapper._license_functions[module_name] except KeyError: # TODO: Custom exception raise Exception(f"we do not currently use {module_name}") # TODO: Custom exception assert getattr(module, license_func)() return function(*args, **kwargs) return wrapper return inner_decorator class RDKitToolkitWrapper(ToolkitWrapper): """ RDKit toolkit wrapper .. warning :: This API is experimental and subject to change. """ _toolkit_name = "The RDKit" _toolkit_installation_instructions = ( "A conda-installable version of the free and open source RDKit cheminformatics " "toolkit can be found at: https://anaconda.org/rdkit/rdkit" ) def __init__(self): super().__init__() self._toolkit_file_read_formats = ["SDF", "MOL", "SMI"] # TODO: Add TDT support if not self.is_available(): raise ToolkitUnavailableException( f"The required toolkit {self._toolkit_name} is not " f"available. {self._toolkit_installation_instructions}" ) else: from rdkit import __version__ as rdkit_version self._toolkit_version = rdkit_version from rdkit import Chem # we have to make sure the toolkit can be loaded before formatting this dict # Note any new file write formats should be added here only self._toolkit_file_write_formats = { "SDF": Chem.SDWriter, "MOL": Chem.SDWriter, "SMI": Chem.SmilesWriter, "PDB": Chem.PDBWriter, "TDT": Chem.TDTWriter, } @property def toolkit_file_write_formats(self): """ List of file formats that this toolkit can write. """ return list(self._toolkit_file_write_formats.keys()) @classmethod def is_available(cls): """ Check whether the RDKit toolkit can be imported Returns ------- is_installed : bool True if RDKit is installed, False otherwise. """ if cls._is_available is None: try: importlib.import_module("rdkit", "Chem") except ImportError: cls._is_available = False else: cls._is_available = True return cls._is_available def from_object(self, obj, allow_undefined_stereo=False, _cls=None): """ If given an rdchem.Mol (or rdchem.Mol-derived object), this function will load it into an openff.toolkit.topology.molecule. Otherwise, it will return False. Parameters ---------- obj : A rdchem.Mol-derived object An object to be type-checked and converted into a Molecule, if possible. allow_undefined_stereo : bool, default=False Whether to accept molecules with undefined stereocenters. If False, an exception will be raised if a molecule with undefined stereochemistry is passed into this function. _cls : class Molecule constructor Returns ------- Molecule or False An openff.toolkit.topology.molecule Molecule. Raises ------ NotImplementedError If the object could not be converted into a Molecule. """ # TODO: Add tests for the from_object functions from rdkit import Chem if _cls is None: from openff.toolkit.topology.molecule import Molecule _cls = Molecule if isinstance(obj, Chem.rdchem.Mol): return _cls.from_rdkit(obj, allow_undefined_stereo=allow_undefined_stereo) raise NotImplementedError( "Cannot create Molecule from {} object".format(type(obj)) ) def from_pdb_and_smiles( self, file_path, smiles, allow_undefined_stereo=False, _cls=None ): """ Create a Molecule from a pdb file and a SMILES string using RDKit. Requires RDKit to be installed. The molecule is created and sanitised based on the SMILES string, we then find a mapping between this molecule and one from the PDB based only on atomic number and connections. The SMILES molecule is then reindex to match the PDB, the conformer is attached and the molecule returned. Parameters ---------- file_path: str PDB file path smiles : str a valid smiles string for the pdb, used for seterochemistry and bond order allow_undefined_stereo : bool, default=False If false, raises an exception if oemol contains undefined stereochemistry. _cls : class Molecule constructor Returns -------- molecule : openff.toolkit.Molecule (or _cls() type) An OFFMol instance with ordering the same as used in the PDB file. Raises ------ InvalidConformerError : if the SMILES and PDB molecules are not isomorphic. """ from rdkit import Chem from openff.toolkit.topology.molecule import InvalidConformerError, Molecule # Make the molecule from smiles offmol = self.from_smiles( smiles, allow_undefined_stereo=allow_undefined_stereo, _cls=_cls ) # Make another molecule from the PDB, allow stero errors here they are expected pdbmol = self.from_rdkit( Chem.MolFromPDBFile(file_path, removeHs=False), allow_undefined_stereo=True, hydrogens_are_explicit=True, _cls=_cls, ) # check isomorphic and get the mapping if true the mapping will be # Dict[pdb_index: offmol_index] sorted by pdb_index isomorphic, mapping = _cls.are_isomorphic( pdbmol, offmol, return_atom_map=True, aromatic_matching=False, formal_charge_matching=False, bond_order_matching=False, atom_stereochemistry_matching=False, bond_stereochemistry_matching=False, ) if mapping is not None: new_mol = offmol.remap(mapping) # the pdb conformer is in the correct order so just attach it here new_mol._add_conformer(pdbmol.conformers[0]) return new_mol else: raise InvalidConformerError("The PDB and SMILES structures do not match.") def from_file( self, file_path, file_format, allow_undefined_stereo=False, _cls=None ): """ Create an openff.toolkit.topology.Molecule from a file using this toolkit. Parameters ---------- file_path : str The file to read the molecule from file_format : str Format specifier, usually file suffix (eg. 'MOL2', 'SMI') Note that not all toolkits support all formats. Check ToolkitWrapper.toolkit_file_read_formats for details. allow_undefined_stereo : bool, default=False If false, raises an exception if oemol contains undefined stereochemistry. _cls : class Molecule constructor Returns ------- molecules : iterable of Molecules a list of Molecule objects is returned. """ from rdkit import Chem file_format = file_format.upper() mols = list() if (file_format == "MOL") or (file_format == "SDF"): for rdmol in Chem.SupplierFromFilename( file_path, removeHs=False, sanitize=False, strictParsing=True ): if rdmol is None: continue # Sanitize the molecules (fails on nitro groups) try: Chem.SanitizeMol( rdmol, Chem.SANITIZE_ALL ^ Chem.SANITIZE_SETAROMATICITY ^ Chem.SANITIZE_ADJUSTHS, ) Chem.AssignStereochemistryFrom3D(rdmol) except ValueError as e: logger.warning(rdmol.GetProp("_Name") + " " + str(e)) continue Chem.SetAromaticity(rdmol, Chem.AromaticityModel.AROMATICITY_MDL) mol = self.from_rdkit( rdmol, allow_undefined_stereo=allow_undefined_stereo, _cls=_cls ) mols.append(mol) elif file_format == "SMI": # TODO: We have to do some special stuff when we import SMILES (currently # just adding H's, but could get fancier in the future). It might be # worthwhile to parse the SMILES file ourselves and pass each SMILES # through the from_smiles function instead for rdmol in Chem.SmilesMolSupplier(file_path, titleLine=False): rdmol = Chem.AddHs(rdmol) mol = self.from_rdkit( rdmol, allow_undefined_stereo=allow_undefined_stereo, _cls=_cls ) mols.append(mol) elif file_format == "PDB": raise Exception( "RDKit can not safely read PDBs on their own. Information about bond order and aromaticity " "is likely to be lost. To read a PDB using RDKit use Molecule.from_pdb_and_smiles()" ) # TODO: See if we can implement PDB+mol/smi combinations to get complete bond information. # testing to see if we can make a molecule from smiles and then use the PDB conformer as the geometry # and just reorder the molecule # https://github.com/openforcefield/openff-toolkit/issues/121 # rdmol = Chem.MolFromPDBFile(file_path, removeHs=False) # mol = Molecule.from_rdkit(rdmol, _cls=_cls) # mols.append(mol) # TODO: Add SMI, TDT(?) support return mols def from_file_obj( self, file_obj, file_format, allow_undefined_stereo=False, _cls=None ): """ Return an openff.toolkit.topology.Molecule from a file-like object (an object with a ".read()" method using this toolkit. .. warning :: This API is experimental and subject to change. Parameters ---------- file_obj : file-like object The file-like object to read the molecule from file_format : str Format specifier, usually file suffix (eg. 'MOL2', 'SMI') Note that not all toolkits support all formats. Check ToolkitWrapper.toolkit_file_read_formats for details. allow_undefined_stereo : bool, default=False If false, raises an exception if oemol contains undefined stereochemistry. _cls : class Molecule constructor Returns ------- molecules : Molecule or list of Molecules a list of Molecule objects is returned. """ from rdkit import Chem mols = [] if (file_format == "MOL") or (file_format == "SDF"): # TODO: Iterate over all mols in file_data for rdmol in Chem.ForwardSDMolSupplier(file_obj): mol = self.from_rdkit(rdmol, _cls=_cls) mols.append(mol) if file_format == "SMI": # TODO: Find a cleaner way to parse SMILES lines file_data = file_obj.read() lines = [line.strip() for line in file_data.split("\n")] # remove blank lines lines.remove("") for line in lines: mol = self.from_smiles(line, _cls=_cls) mols.append(mol) elif file_format == "PDB": raise Exception( "RDKit can not safely read PDBs on their own. Information about bond order and aromaticity " "is likely to be lost. To read a PDB using RDKit use Molecule.from_pdb_and_smiles()" ) # TODO: See if we can implement PDB+mol/smi combinations to get complete bond information. # https://github.com/openforcefield/openff-toolkit/issues/121 # file_data = file_obj.read() # rdmol = Chem.MolFromPDBBlock(file_data) # mol = Molecule.from_rdkit(rdmol, _cls=_cls) # mols.append(mol) # TODO: TDT file support return mols def to_file_obj(self, molecule, file_obj, file_format): """ Writes an OpenFF Molecule to a file-like object Parameters ---------- molecule : an OpenFF Molecule The molecule to write file_obj The file-like object to write to file_format The format for writing the molecule data Returns ------- """ file_format = file_format.upper() rdmol = self.to_rdkit(molecule) try: writer = self._toolkit_file_write_formats[file_format](file_obj) writer.write(rdmol) writer.close() # if we can not write to that file type catch the error here except KeyError: raise ValueError( f"The requested file type ({file_format}) is not supported to be written using " f"RDKitToolkitWrapper." ) def to_file(self, molecule, file_path, file_format): """ Writes an OpenFF Molecule to a file-like object Parameters ---------- molecule : an OpenFF Molecule The molecule to write file_path The file path to write to file_format The format for writing the molecule data Returns ------ """ # open a file object and pass to the object writer with open(file_path, "w") as file_obj: self.to_file_obj( molecule=molecule, file_obj=file_obj, file_format=file_format ) def enumerate_stereoisomers( self, molecule, undefined_only=False, max_isomers=20, rationalise=True ): """ Enumerate the stereocenters and bonds of the current molecule. Parameters ---------- molecule: openff.toolkit.topology.Molecule The molecule whose state we should enumerate undefined_only: bool optional, default=False If we should enumerate all stereocenters and bonds or only those with undefined stereochemistry max_isomers: int optional, default=20 The maximum amount of molecules that should be returned rationalise: bool optional, default=True If we should try to build and rationalise the molecule to ensure it can exist Returns -------- molecules: List[openff.toolkit.topology.Molecule] A list of openff.toolkit.topology.Molecule instances """ from rdkit import Chem from rdkit.Chem.EnumerateStereoisomers import ( EnumerateStereoisomers, StereoEnumerationOptions, ) # create the molecule rdmol = self.to_rdkit(molecule=molecule) # in case any bonds/centers are missing stereo chem flag it here Chem.AssignStereochemistry( rdmol, cleanIt=True, force=True, flagPossibleStereoCenters=True ) Chem.FindPotentialStereoBonds(rdmol) # set up the options stereo_opts = StereoEnumerationOptions( tryEmbedding=rationalise, onlyUnassigned=undefined_only, maxIsomers=max_isomers, ) isomers = tuple(EnumerateStereoisomers(rdmol, options=stereo_opts)) molecules = [] for isomer in isomers: # isomer has CIS/TRANS tags so convert back to E/Z Chem.SetDoubleBondNeighborDirections(isomer) Chem.AssignStereochemistry(isomer, force=True, cleanIt=True) mol = self.from_rdkit(isomer, _cls=molecule.__class__) if mol != molecule: molecules.append(mol) return molecules def enumerate_tautomers(self, molecule, max_states=20): """ Enumerate the possible tautomers of the current molecule. Parameters ---------- molecule: openff.toolkit.topology.Molecule The molecule whose state we should enumerate max_states: int optional, default=20 The maximum amount of molecules that should be returned Returns ------- molecules: List[openff.toolkit.topology.Molecule] A list of openff.toolkit.topology.Molecule instances not including the input molecule. """ from rdkit import Chem from rdkit.Chem.MolStandardize import rdMolStandardize enumerator = rdMolStandardize.TautomerEnumerator() enumerator.SetMaxTautomers(max_states) rdmol = Chem.RemoveHs(molecule.to_rdkit()) tautomers = enumerator.Enumerate(rdmol) # make a list of OpenFF molecules excluding the input molecule molecules = [] for taut in tautomers: taut_hs = Chem.AddHs(taut) mol = self.from_smiles( Chem.MolToSmiles(taut_hs), allow_undefined_stereo=True ) if mol != molecule: molecules.append(mol) return molecules[:max_states] def canonical_order_atoms(self, molecule): """ Canonical order the atoms in the molecule using the RDKit. Parameters ---------- molecule: openff.toolkit.topology.Molecule The input molecule Returns ------- molecule : openff.toolkit.topology.Molecule The input molecule, with canonically-indexed atoms and bonds. """ from rdkit import Chem rdmol = self.to_rdkit(molecule) # get the canonical ordering with hydrogens first # this is the default behaviour of RDKit atom_order = list(Chem.CanonicalRankAtoms(rdmol, breakTies=True)) heavy_atoms = rdmol.GetNumHeavyAtoms() hydrogens = rdmol.GetNumAtoms() - heavy_atoms # now go through and change the rankings to get the heavy atoms first if hydrogens are present if hydrogens != 0: for i in range(len(atom_order)): if rdmol.GetAtomWithIdx(i).GetAtomicNum() != 1: atom_order[i] -= hydrogens else: atom_order[i] += heavy_atoms # make an atom mapping from the atom_order and remap the molecule atom_mapping = dict((i, rank) for i, rank in enumerate(atom_order)) return molecule.remap(atom_mapping, current_to_new=True) def to_smiles(self, molecule, isomeric=True, explicit_hydrogens=True, mapped=False): """ Uses the RDKit toolkit to convert a Molecule into a SMILES string. A partially mapped smiles can also be generated for atoms of interest by supplying an `atom_map` to the properties dictionary. Parameters ---------- molecule : An openff.toolkit.topology.Molecule The molecule to convert into a SMILES. isomeric: bool optional, default= True return an isomeric smiles explicit_hydrogens: bool optional, default=True return a smiles string containing all hydrogens explicitly mapped: bool optional, default=False return a explicit hydrogen mapped smiles, the atoms to be mapped can be controlled by supplying an atom map into the properties dictionary. If no mapping is passed all atoms will be mapped in order, else an atom map dictionary from the current atom index to the map id should be supplied with no duplicates. The map ids (values) should start from 0 or 1. Returns ------- smiles : str The SMILES of the input molecule. """ from rdkit import Chem rdmol = self.to_rdkit(molecule) if not explicit_hydrogens: # remove the hydrogens from the molecule rdmol = Chem.RemoveHs(rdmol) if mapped: assert explicit_hydrogens is True, ( "Mapped smiles require all hydrogens and " "stereochemistry to be defined to retain order" ) # if we only want to map specific atoms check for an atom map atom_map = molecule._properties.get("atom_map", None) if atom_map is not None: # make sure there are no repeated indices map_ids = set(atom_map.values()) if len(map_ids) < len(atom_map): atom_map = None elif 0 in atom_map.values(): # we need to increment the map index for atom, map in atom_map.items(): atom_map[atom] = map + 1 if atom_map is None: # now we need to add the indexing to the rdmol to get it in the smiles for atom in rdmol.GetAtoms(): # the mapping must start from 1, as RDKit uses 0 to represent no mapping. atom.SetAtomMapNum(atom.GetIdx() + 1) else: for atom in rdmol.GetAtoms(): try: # try to set the atom map map_idx = atom_map[atom.GetIdx()] atom.SetAtomMapNum(map_idx) except KeyError: continue return Chem.MolToSmiles( rdmol, isomericSmiles=isomeric, allHsExplicit=explicit_hydrogens ) def from_smiles( self, smiles, hydrogens_are_explicit=False, allow_undefined_stereo=False, _cls=None, ): """ Create a Molecule from a SMILES string using the RDKit toolkit. .. warning :: This API is experimental and subject to change. Parameters ---------- smiles : str The SMILES string to turn into a molecule hydrogens_are_explicit : bool, default=False If False, RDKit will perform hydrogen addition using Chem.AddHs allow_undefined_stereo : bool, default=False Whether to accept SMILES with undefined stereochemistry. If False, an exception will be raised if a SMILES with undefined stereochemistry is passed into this function. _cls : class Molecule constructor Returns ------- molecule : openff.toolkit.topology.Molecule An OpenFF style molecule. """ from rdkit import Chem rdmol = Chem.MolFromSmiles(smiles, sanitize=False) # strip the atom map from the molecule if it has one # so we don't affect the sterochemistry tags for atom in rdmol.GetAtoms(): if atom.GetAtomMapNum() != 0: # set the map back to zero but hide the index in the atom prop data atom.SetProp("_map_idx", str(atom.GetAtomMapNum())) # set it back to zero atom.SetAtomMapNum(0) # Chem.SanitizeMol calls updatePropertyCache so we don't need to call it ourselves # https://www.rdkit.org/docs/cppapi/namespaceRDKit_1_1MolOps.html#a8d831787aaf2d65d9920c37b25b476f5 Chem.SanitizeMol( rdmol, Chem.SANITIZE_ALL ^ Chem.SANITIZE_ADJUSTHS ^ Chem.SANITIZE_SETAROMATICITY, ) Chem.SetAromaticity(rdmol, Chem.AromaticityModel.AROMATICITY_MDL) # Chem.MolFromSmiles adds bond directions (i.e. ENDDOWNRIGHT/ENDUPRIGHT), but # doesn't set bond.GetStereo(). We need to call AssignStereochemistry for that. Chem.AssignStereochemistry(rdmol) # Throw an exception/warning if there is unspecified stereochemistry. if not allow_undefined_stereo: self._detect_undefined_stereo( rdmol, err_msg_prefix="Unable to make OFFMol from SMILES: " ) # Add explicit hydrogens if they aren't there already if not hydrogens_are_explicit: rdmol = Chem.AddHs(rdmol) elif hydrogens_are_explicit: for atom_idx in range(rdmol.GetNumAtoms()): atom = rdmol.GetAtomWithIdx(atom_idx) if atom.GetNumImplicitHs() != 0: raise ValueError( f"'hydrogens_are_explicit' was specified as True, but RDKit toolkit interpreted " f"SMILES '{smiles}' as having implicit hydrogen. If this SMILES is intended to " f"express all explicit hydrogens in the molecule, then you should construct the " f"desired molecule as an RDMol with no implicit hydrogens, and then use " f"Molecule.from_rdkit() to create the desired OFFMol." ) molecule = self.from_rdkit( rdmol, _cls=_cls, allow_undefined_stereo=allow_undefined_stereo, hydrogens_are_explicit=hydrogens_are_explicit, ) return molecule def from_inchi(self, inchi, allow_undefined_stereo=False, _cls=None): """ Construct a Molecule from a InChI representation Parameters ---------- inchi : str The InChI representation of the molecule. allow_undefined_stereo : bool, default=False Whether to accept InChI with undefined stereochemistry. If False, an exception will be raised if a InChI with undefined stereochemistry is passed into this function. _cls : class Molecule constructor Returns ------- molecule : openff.toolkit.topology.Molecule """ from rdkit import Chem # this seems to always remove the hydrogens rdmol = Chem.MolFromInchi(inchi, sanitize=False, removeHs=False) # try and catch an InChI parsing error if rdmol is None: raise RuntimeError( "There was an issue parsing the InChI string, please check and try again." ) # process the molecule # TODO do we need this with inchi? rdmol.UpdatePropertyCache(strict=False) Chem.SanitizeMol( rdmol, Chem.SANITIZE_ALL ^ Chem.SANITIZE_ADJUSTHS ^ Chem.SANITIZE_SETAROMATICITY, ) Chem.SetAromaticity(rdmol, Chem.AromaticityModel.AROMATICITY_MDL) # add hydrogens back here rdmol = Chem.AddHs(rdmol) molecule = self.from_rdkit( rdmol, allow_undefined_stereo=allow_undefined_stereo, _cls=_cls ) return molecule def generate_conformers( self, molecule, n_conformers=1, rms_cutoff=None, clear_existing=True, _cls=None ): """ Generate molecule conformers using RDKit. .. warning :: This API is experimental and subject to change. .. todo :: * which parameters should we expose? (or can we implement a general system with \*\*kwargs?) * will the coordinates be returned in the OpenFF Molecule's own indexing system? Or is there a chance that they'll get reindexed when we convert the input into an RDMol? Parameters ---------- molecule : a :class:`Molecule` The molecule to generate conformers for. n_conformers : int, default=1 Maximum number of conformers to generate. rms_cutoff : simtk.Quantity-wrapped float, in units of distance, optional, default=None The minimum RMS value at which two conformers are considered redundant and one is deleted. If None, the cutoff is set to 1 Angstrom clear_existing : bool, default=True Whether to overwrite existing conformers for the molecule. _cls : class Molecule constructor """ from rdkit.Chem import AllChem if rms_cutoff is None: rms_cutoff = 1.0 * unit.angstrom rdmol = self.to_rdkit(molecule) # TODO: This generates way more conformations than omega, given the same nConfs and RMS threshold. Is there some way to set an energy cutoff as well? AllChem.EmbedMultipleConfs( rdmol, numConfs=n_conformers, pruneRmsThresh=rms_cutoff / unit.angstrom, randomSeed=1, # params=AllChem.ETKDG() ) molecule2 = self.from_rdkit( rdmol, allow_undefined_stereo=True, _cls=molecule.__class__ ) if clear_existing: molecule._conformers = list() for conformer in molecule2._conformers: molecule._add_conformer(conformer) def assign_partial_charges( self, molecule, partial_charge_method=None, use_conformers=None, strict_n_conformers=False, _cls=None, ): """ Compute partial charges with RDKit, and assign the new values to the partial_charges attribute. .. warning :: This API is experimental and subject to change. Parameters ---------- molecule : openff.toolkit.topology.Molecule Molecule for which partial charges are to be computed partial_charge_method : str, optional, default=None The charge model to use. One of ['mmff94']. If None, 'mmff94' will be used. * 'mmff94': Applies partial charges using the Merck Molecular Force Field (MMFF). This method does not make use of conformers, and hence ``use_conformers`` and ``strict_n_conformers`` will not impact the partial charges produced. use_conformers : iterable of simtk.unit.Quantity-wrapped numpy arrays, each with shape (n_atoms, 3) and dimension of distance. Optional, default = None Coordinates to use for partial charge calculation. If None, an appropriate number of conformers will be generated. strict_n_conformers : bool, default=False Whether to raise an exception if an invalid number of conformers is provided for the given charge method. If this is False and an invalid number of conformers is found, a warning will be raised. _cls : class Molecule constructor Raises ------ ChargeMethodUnavailableError if the requested charge method can not be handled by this toolkit ChargeCalculationError if the charge method is supported by this toolkit, but fails """ import numpy as np from rdkit.Chem import AllChem SUPPORTED_CHARGE_METHODS = {"mmff94"} if partial_charge_method is None: partial_charge_method = "mmff94" partial_charge_method = partial_charge_method.lower() if partial_charge_method not in SUPPORTED_CHARGE_METHODS: raise ChargeMethodUnavailableError( f"partial_charge_method '{partial_charge_method}' is not available from RDKitToolkitWrapper. " f"Available charge methods are {list(SUPPORTED_CHARGE_METHODS)} " ) rdkit_molecule = molecule.to_rdkit() charges = None if partial_charge_method == "mmff94": mmff_properties = AllChem.MMFFGetMoleculeProperties( rdkit_molecule, "MMFF94" ) charges = np.array( [ mmff_properties.GetMMFFPartialCharge(i) for i in range(molecule.n_atoms) ] ) molecule.partial_charges = charges * unit.elementary_charge @classmethod def _elf_is_problematic_conformer( cls, molecule: "Molecule", conformer: unit.Quantity ) -> Tuple[bool, Optional[str]]: """A function which checks if a particular conformer is known to be problematic when computing ELF partial charges. Currently this includes conformers which: * contain a trans-COOH configuration. The trans conformer is discarded because it leads to strong electrostatic interactions when assigning charges, and these result in unreasonable charges. Downstream calculations have observed up to a 4 log unit error in water-octanol logP calculations when using charges assigned from trans conformers. Returns ------- A tuple of a bool stating whether the conformer is problematic and, if it is, a string message explaing why. If the conformer is not problematic, the second return value will be none. """ from rdkit.Chem.rdMolTransforms import GetDihedralRad # Create a copy of the molecule which contains only this conformer. molecule_copy = copy.deepcopy(molecule) molecule_copy._conformers = [conformer] rdkit_molecule = molecule_copy.to_rdkit() # Check for trans-COOH configurations carboxylic_acid_matches = cls._find_smarts_matches( rdkit_molecule, "[#6X3:2](=[#8:1])(-[#8X2H1:3]-[#1:4])" ) for match in carboxylic_acid_matches: dihedral_angle = GetDihedralRad(rdkit_molecule.GetConformer(0), *match) if dihedral_angle > np.pi / 2.0: # Discard the 'trans' conformer. return ( True, "Molecules which contain COOH functional groups in a trans " "configuration are discarded by the ELF method.", ) return False, None @classmethod def _elf_prune_problematic_conformers( cls, molecule: "Molecule" ) -> List[unit.Quantity]: """A function which attempts to remove conformers which are known to be problematic when computing ELF partial charges. Currently this includes conformers which: * contain a trans-COOH configuration. These conformers ... TODO add reason. Notes ----- * Problematic conformers are flagged by the ``RDKitToolkitWrapper._elf_is_problematic_conformer`` function. Returns ------- The conformers to retain. """ valid_conformers = [] for i, conformer in enumerate(molecule.conformers): is_problematic, reason = cls._elf_is_problematic_conformer( molecule, conformer ) if is_problematic: logger.warning(f"Discarding conformer {i}: {reason}") else: valid_conformers.append(conformer) return valid_conformers @classmethod def _elf_compute_electrostatic_energy( cls, molecule: "Molecule", conformer: unit.Quantity ) -> float: """Computes the 'electrostatic interaction energy' of a particular conformer of a molecule. The energy is computed as the sum of ``|q_i * q_j| * r_ij^-1`` over all pairs of atoms (i, j) excluding 1-2 and 1-3 terms, where q_i is the partial charge of atom i and r_ij the Euclidean distance between atoms i and j. Notes ----- * The partial charges will be taken from the molecule directly. Parameters ---------- molecule The molecule containing the partial charges. conformer The conformer to compute the energy of. This should be a unit wrapped numpy array with shape=(n_atoms, 3) with units compatible with angstroms. Returns ------- The electrostatic interaction energy in units of [e^2 / Angstrom]. """ if molecule.partial_charges is None: raise ValueError("The molecule has no partial charges assigned.") partial_charges = np.abs( molecule.partial_charges.value_in_unit(unit.elementary_charge) ).reshape(-1, 1) # Build an exclusion list for 1-2 and 1-3 interactions. excluded_pairs = { *[(bond.atom1_index, bond.atom2_index) for bond in molecule.bonds], *[ (angle[0].molecule_atom_index, angle[-1].molecule_atom_index) for angle in molecule.angles ], } # Build the distance matrix between all pairs of atoms. coordinates = conformer.value_in_unit(unit.angstrom) distances = np.sqrt( np.sum(np.square(coordinates)[:, np.newaxis, :], axis=2) - 2 * coordinates.dot(coordinates.T) + np.sum(np.square(coordinates), axis=1) ) # Handle edge cases where the squared distance is slightly negative due to # precision issues np.fill_diagonal(distances, 0.0) inverse_distances = np.reciprocal( distances, out=np.zeros_like(distances), where=~np.isclose(distances, 0.0) ) # Multiply by the charge products. charge_products = partial_charges @ partial_charges.T for x, y in excluded_pairs: charge_products[x, y] = 0.0 charge_products[y, x] = 0.0 interaction_energies = inverse_distances * charge_products return 0.5 * interaction_energies.sum() @classmethod def _elf_compute_rms_matrix(cls, molecule: "Molecule") -> np.ndarray: """Computes the symmetric RMS matrix of all conformers in a molecule taking only heavy atoms into account. Parameters ---------- molecule The molecule containing the conformers. Returns ------- The RMS matrix with shape=(n_conformers, n_conformers). """ from rdkit import Chem from rdkit.Chem import AllChem rdkit_molecule: Chem.RWMol = Chem.RemoveHs(molecule.to_rdkit()) n_conformers = len(molecule.conformers) conformer_ids = [conf.GetId() for conf in rdkit_molecule.GetConformers()] # Compute the RMS matrix making sure to take into account any automorhism (e.g # a phenyl or nitro substituent flipped 180 degrees. rms_matrix = np.zeros((n_conformers, n_conformers)) for i, j in itertools.combinations(conformer_ids, 2): rms_matrix[i, j] = AllChem.GetBestRMS( rdkit_molecule, rdkit_molecule, conformer_ids[i], conformer_ids[j], ) rms_matrix += rms_matrix.T return rms_matrix @classmethod def _elf_select_diverse_conformers( cls, molecule: "Molecule", ranked_conformers: List[unit.Quantity], limit: int, rms_tolerance: unit.Quantity, ) -> List[unit.Quantity]: """Attempt to greedily select a specified number conformers which are maximally diverse. The conformer with the lowest electrostatic energy (the first conformer in the ``ranked_conformers`` list) is always chosen. After that selection proceeds by: a) selecting an un-selected conformer which is the most different from those already selected, and whose RMS compared to each selected conformer is greater than ``rms_tolerance``. Here most different means the conformer which has the largest sum of RMS with the selected conformers. b) repeating a) until either ``limit`` number of conformers have been selected, or there are no more distinct conformers to select from. Notes ----- * As the selection is greedy there is no guarantee that the selected conformers will be the optimal distinct i.e. there may be other selections of conformers which are more distinct. Parameters ---------- molecule The molecule object which matches the conformers to select from. ranked_conformers A list of conformers to select from, ranked by their electrostatic interaction energy (see ``_compute_electrostatic_energy``). limit The maximum number of conformers to select. rms_tolerance Conformers whose RMS is within this amount will be treated as identical and the duplicate discarded. Returns ------- The select list of conformers. """ # Compute the RMS between all pairs of conformers molecule = copy.deepcopy(molecule) molecule.conformers.clear() for conformer in ranked_conformers: molecule.add_conformer(conformer) rms_matrix = cls._elf_compute_rms_matrix(molecule) # Apply the greedy selection process. closed_list = np.zeros(limit).astype(int) closed_mask = np.zeros(rms_matrix.shape[0], dtype=bool) n_selected = 1 for i in range(min(molecule.n_conformers, limit - 1)): distances = rms_matrix[closed_list[: i + 1], :].sum(axis=0) # Exclude already selected conformers or conformers which are too similar # to those already selected. closed_mask[ np.any( rms_matrix[closed_list[: i + 1], :] < rms_tolerance.value_in_unit(unit.angstrom), axis=0, ) ] = True if np.all(closed_mask): # Stop of there are no more distinct conformers to select from. break distant_index = np.ma.array(distances, mask=closed_mask).argmax() closed_list[i + 1] = distant_index n_selected += 1 return [ranked_conformers[i.item()] for i in closed_list[:n_selected]] def apply_elf_conformer_selection( self, molecule: "Molecule", percentage: float = 2.0, limit: int = 10, rms_tolerance: unit.Quantity = 0.05 * unit.angstrom, ): """Applies the `ELF method <https://docs.eyesopen.com/toolkits/python/quacpactk/molchargetheory.html#elf-conformer-selection>`_ to select a set of diverse conformers which have minimal electrostatically strongly interacting functional groups from a molecules conformers. The diverse conformer selection is performed by the ``_elf_select_diverse_conformers`` function, which attempts to greedily select conformers which are most distinct according to their RMS. Warnings -------- * Although this function is inspired by the OpenEye ELF10 method, this implementation may yield slightly different conformers due to potential differences in this and the OE closed source implementation. Notes ----- * The input molecule should have a large set of conformers already generated to select the ELF10 conformers from. * The selected conformers will be retained in the `molecule.conformers` list while unselected conformers will be discarded. * Only heavy atoms are included when using the RMS to select diverse conformers. See Also -------- RDKitToolkitWrapper._elf_select_diverse_conformers Parameters ---------- molecule The molecule which contains the set of conformers to select from. percentage The percentage of conformers with the lowest electrostatic interaction energies to greedily select from. limit The maximum number of conformers to select. rms_tolerance Conformers whose RMS is within this amount will be treated as identical and the duplicate discarded. """ if molecule.n_conformers == 0: return # Copy the input molecule so we can directly perturb it within the method. molecule_copy = copy.deepcopy(molecule) # Prune any problematic conformers, such as trans-COOH configurations. conformers = self._elf_prune_problematic_conformers(molecule_copy) if len(conformers) == 0: raise ValueError( "There were no conformers to select from after discarding conformers " "which are known to be problematic when computing ELF partial charges. " "Make sure to generate a diverse array of conformers before calling the " "`RDKitToolkitWrapper.apply_elf_conformer_selection` method." ) # Generate a set of absolute MMFF94 partial charges for the molecule and use # these to compute the electrostatic interaction energy of each conformer. self.assign_partial_charges(molecule_copy, "mmff94") conformer_energies = [ ( self._elf_compute_electrostatic_energy(molecule_copy, conformer), conformer, ) for conformer in conformers ] # Rank the conformer energies and retain `percentage`% with the lowest energies. conformer_energies = sorted(conformer_energies, key=lambda x: x[0]) cutoff_index = max(1, int(len(conformer_energies) * percentage / 100.0)) low_energy_conformers = [ conformer for _, conformer in conformer_energies[:cutoff_index] ] # Attempt to greedily select `limit` conformers which are maximally diverse. diverse_conformers = self._elf_select_diverse_conformers( molecule_copy, low_energy_conformers, limit, rms_tolerance ) molecule._conformers = diverse_conformers def from_rdkit( self, rdmol, allow_undefined_stereo=False, hydrogens_are_explicit=False, _cls=None, ): """ Create a Molecule from an RDKit molecule. Requires the RDKit to be installed. .. warning :: This API is experimental and subject to change. Parameters ---------- rdmol : rkit.RDMol An RDKit molecule allow_undefined_stereo : bool, default=False If false, raises an exception if rdmol contains undefined stereochemistry. hydrogens_are_explicit : bool, default=False If False, RDKit will perform hydrogen addition using Chem.AddHs _cls : class Molecule constructor Returns ------- molecule : openff.toolkit.topology.Molecule An OpenFF molecule Examples -------- Create a molecule from an RDKit molecule >>> from rdkit import Chem >>> from openff.toolkit.tests.utils import get_data_file_path >>> rdmol = Chem.MolFromMolFile(get_data_file_path('systems/monomers/ethanol.sdf')) >>> toolkit_wrapper = RDKitToolkitWrapper() >>> molecule = toolkit_wrapper.from_rdkit(rdmol) """ from rdkit import Chem if _cls is None: from openff.toolkit.topology.molecule import Molecule _cls = Molecule # Make a copy of the RDKit Mol as we'll need to change it (e.g. assign stereo). rdmol = Chem.Mol(rdmol) if not hydrogens_are_explicit: rdmol = Chem.AddHs(rdmol, addCoords=True) # Sanitizing the molecule. We handle aromaticity and chirality manually. # This SanitizeMol(...) calls cleanUp, updatePropertyCache, symmetrizeSSSR, # assignRadicals, setConjugation, and setHybridization. Chem.SanitizeMol( rdmol, ( Chem.SANITIZE_ALL ^ Chem.SANITIZE_SETAROMATICITY ^ Chem.SANITIZE_ADJUSTHS ^ Chem.SANITIZE_CLEANUPCHIRALITY ^ Chem.SANITIZE_KEKULIZE ), ) Chem.SetAromaticity(rdmol, Chem.AromaticityModel.AROMATICITY_MDL) # SetAromaticity set aromatic bonds to 1.5, but Molecule.bond_order is an # integer (contrarily to fractional_bond_order) so we need the Kekule order. Chem.Kekulize(rdmol) # Make sure the bond stereo tags are set before checking for # undefined stereo. RDKit can figure out bond stereo from other # information in the Mol object like bond direction properties. # Do not overwrite eventual chiral tags provided by the user. Chem.AssignStereochemistry(rdmol, cleanIt=False) # Check for undefined stereochemistry. self._detect_undefined_stereo( rdmol, raise_warning=allow_undefined_stereo, err_msg_prefix="Unable to make OFFMol from RDMol: ", ) # Create a new OpenFF Molecule offmol = _cls() # If RDMol has a title save it if rdmol.HasProp("_Name"): # raise Exception('{}'.format(rdmol.GetProp('name'))) offmol.name = rdmol.GetProp("_Name") else: offmol.name = "" # Store all properties # TODO: Should there be an API point for storing properties? properties = rdmol.GetPropsAsDict() offmol._properties = properties # setting chirality in openeye requires using neighbor atoms # therefore we can't do it until after the atoms and bonds are all added map_atoms = {} map_bonds = {} # if we are loading from a mapped smiles extract the mapping atom_mapping = {} for rda in rdmol.GetAtoms(): rd_idx = rda.GetIdx() # if the molecule was made from a mapped smiles this has been hidden # so that it does not affect the sterochemistry tags try: map_id = int(rda.GetProp("_map_idx")) except KeyError: map_id = rda.GetAtomMapNum() # create a new atom # atomic_number = oemol.NewAtom(rda.GetAtomicNum()) atomic_number = rda.GetAtomicNum() formal_charge = rda.GetFormalCharge() * unit.elementary_charge is_aromatic = rda.GetIsAromatic() if rda.HasProp("_Name"): name = rda.GetProp("_Name") else: # check for PDB names try: name = rda.GetMonomerInfo().GetName().strip() except AttributeError: name = "" # If chiral, store the chirality to be set later stereochemistry = None # tag = rda.GetChiralTag() if rda.HasProp("_CIPCode"): stereo_code = rda.GetProp("_CIPCode") # if tag == Chem.CHI_TETRAHEDRAL_CCW: if stereo_code == "R": stereochemistry = "R" # if tag == Chem.CHI_TETRAHEDRAL_CW: elif stereo_code == "S": stereochemistry = "S" else: raise UndefinedStereochemistryError( "In from_rdkit: Expected atom stereochemistry of R or S. " "Got {} instead.".format(stereo_code) ) atom_index = offmol._add_atom( atomic_number, formal_charge, is_aromatic, name=name, stereochemistry=stereochemistry, ) map_atoms[rd_idx] = atom_index atom_mapping[atom_index] = map_id # If we have a full / partial atom map add it to the molecule. Zeroes 0 # indicates no mapping if {*atom_mapping.values()} != {0}: offmol._properties["atom_map"] = { idx: map_idx for idx, map_idx in atom_mapping.items() if map_idx != 0 } # Similar to chirality, stereochemistry of bonds in OE is set relative to their neighbors for rdb in rdmol.GetBonds(): rdb_idx = rdb.GetIdx() a1 = rdb.GetBeginAtomIdx() a2 = rdb.GetEndAtomIdx() # Determine bond aromaticity and Kekulized bond order is_aromatic = rdb.GetIsAromatic() order = rdb.GetBondTypeAsDouble() # Convert floating-point bond order to integral bond order order = int(order) # create a new bond bond_index = offmol._add_bond( map_atoms[a1], map_atoms[a2], order, is_aromatic ) map_bonds[rdb_idx] = bond_index # Now fill in the cached (structure-dependent) properties. We have to have the 2D structure of the molecule # in place first, because each call to add_atom and add_bond invalidates all cached properties for rdb in rdmol.GetBonds(): rdb_idx = rdb.GetIdx() offb_idx = map_bonds[rdb_idx] offb = offmol.bonds[offb_idx] # determine if stereochemistry is needed # Note that RDKit has 6 possible values of bond stereo: CIS, TRANS, E, Z, ANY, or NONE # The logic below assumes that "ANY" and "NONE" mean the same thing. stereochemistry = None tag = rdb.GetStereo() if tag == Chem.BondStereo.STEREOZ: stereochemistry = "Z" elif tag == Chem.BondStereo.STEREOE: stereochemistry = "E" elif tag == Chem.BondStereo.STEREOTRANS or tag == Chem.BondStereo.STEREOCIS: raise ValueError( "Expected RDKit bond stereochemistry of E or Z, got {} instead".format( tag ) ) offb._stereochemistry = stereochemistry fractional_bond_order = None if rdb.HasProp("fractional_bond_order"): fractional_bond_order = rdb.GetDoubleProp("fractional_bond_order") offb.fractional_bond_order = fractional_bond_order # TODO: Save conformer(s), if present # If the rdmol has a conformer, store its coordinates if len(rdmol.GetConformers()) != 0: for conf in rdmol.GetConformers(): n_atoms = offmol.n_atoms # TODO: Will this always be angstrom when loading from RDKit? positions = unit.Quantity(np.zeros((n_atoms, 3)), unit.angstrom) for rd_idx, off_idx in map_atoms.items(): atom_coords = conf.GetPositions()[rd_idx, :] * unit.angstrom positions[off_idx, :] = atom_coords offmol._add_conformer(positions) partial_charges = unit.Quantity( np.zeros(shape=offmol.n_atoms, dtype=np.float64), unit=unit.elementary_charge, ) any_atom_has_partial_charge = False for rd_idx, rd_atom in enumerate(rdmol.GetAtoms()): off_idx = map_atoms[rd_idx] if rd_atom.HasProp("PartialCharge"): charge = rd_atom.GetDoubleProp("PartialCharge") * unit.elementary_charge partial_charges[off_idx] = charge any_atom_has_partial_charge = True else: # If some other atoms had partial charges but this one doesn't, raise an Exception if any_atom_has_partial_charge: raise ValueError( "Some atoms in rdmol have partial charges, but others do not." ) if any_atom_has_partial_charge: offmol.partial_charges = partial_charges else: offmol.partial_charges = None return offmol @classmethod def to_rdkit(cls, molecule, aromaticity_model=DEFAULT_AROMATICITY_MODEL): """ Create an RDKit molecule Requires the RDKit to be installed. .. warning :: This API is experimental and subject to change. Parameters ---------- aromaticity_model : str, optional, default=DEFAULT_AROMATICITY_MODEL The aromaticity model to use Returns ------- rdmol : rkit.RDMol An RDKit molecule Examples -------- Convert a molecule to RDKit >>> from openff.toolkit.topology import Molecule >>> ethanol = Molecule.from_smiles('CCO') >>> rdmol = ethanol.to_rdkit() """ from rdkit import Chem, Geometry # Create an editable RDKit molecule rdmol = Chem.RWMol() # Set name # TODO: What is the best practice for how this should be named? if not (molecule.name is None): rdmol.SetProp("_Name", molecule.name) # TODO: Set other properties for name, value in molecule.properties.items(): if type(value) == str: rdmol.SetProp(name, value) elif type(value) == int: rdmol.SetIntProp(name, value) elif type(value) == float: rdmol.SetDoubleProp(name, value) elif type(value) == bool: rdmol.SetBoolProp(name, value) else: # Shove everything else into a string rdmol.SetProp(name, str(value)) _bondtypes = { 1: Chem.BondType.SINGLE, 1.5: Chem.BondType.AROMATIC, 2: Chem.BondType.DOUBLE, 3: Chem.BondType.TRIPLE, 4: Chem.BondType.QUADRUPLE, 5: Chem.BondType.QUINTUPLE, 6: Chem.BondType.HEXTUPLE, 7: Chem.BondType.ONEANDAHALF, } for index, atom in enumerate(molecule.atoms): rdatom = Chem.Atom(atom.atomic_number) rdatom.SetFormalCharge( atom.formal_charge.value_in_unit(unit.elementary_charge) ) rdatom.SetIsAromatic(atom.is_aromatic) rdatom.SetProp("_Name", atom.name) ## Stereo handling code moved to after bonds are added if atom.stereochemistry == "S": rdatom.SetChiralTag(Chem.CHI_TETRAHEDRAL_CW) elif atom.stereochemistry == "R": rdatom.SetChiralTag(Chem.CHI_TETRAHEDRAL_CCW) rd_index = rdmol.AddAtom(rdatom) # Let's make sure al the atom indices in the two molecules # are the same, otherwise we need to create an atom map. assert index == atom.molecule_atom_index assert index == rd_index for bond in molecule.bonds: atom_indices = ( bond.atom1.molecule_atom_index, bond.atom2.molecule_atom_index, ) rdmol.AddBond(*atom_indices) rdbond = rdmol.GetBondBetweenAtoms(*atom_indices) if not (bond.fractional_bond_order is None): rdbond.SetDoubleProp( "fractional_bond_order", bond.fractional_bond_order ) # Assign bond type, which is based on order unless it is aromatic if bond.is_aromatic: rdbond.SetBondType(_bondtypes[1.5]) rdbond.SetIsAromatic(True) else: rdbond.SetBondType(_bondtypes[bond.bond_order]) rdbond.SetIsAromatic(False) Chem.SanitizeMol( rdmol, Chem.SANITIZE_ALL ^ Chem.SANITIZE_ADJUSTHS ^ Chem.SANITIZE_SETAROMATICITY, ) # Fix for aromaticity being lost if aromaticity_model == "OEAroModel_MDL": Chem.SetAromaticity(rdmol, Chem.AromaticityModel.AROMATICITY_MDL) else: raise ValueError(f"Aromaticity model {aromaticity_model} not recognized") # Assign atom stereochemsitry and collect atoms for which RDKit # can't figure out chirality. The _CIPCode property of these atoms # will be forcefully set to the stereo we want (see #196). undefined_stereo_atoms = {} for index, atom in enumerate(molecule.atoms): rdatom = rdmol.GetAtomWithIdx(index) # Skip non-chiral atoms. if atom.stereochemistry is None: continue # Let's randomly assign this atom's (local) stereo to CW # and check if this causes the (global) stereo to be set # to the desired one (S or R). rdatom.SetChiralTag(Chem.CHI_TETRAHEDRAL_CW) # We need to do force and cleanIt to recalculate CIP stereo. Chem.AssignStereochemistry(rdmol, force=True, cleanIt=True) # If our random initial assignment worked, then we're set. if ( rdatom.HasProp("_CIPCode") and rdatom.GetProp("_CIPCode") == atom.stereochemistry ): continue # Otherwise, set it to CCW. rdatom.SetChiralTag(Chem.CHI_TETRAHEDRAL_CCW) # We need to do force and cleanIt to recalculate CIP stereo. Chem.AssignStereochemistry(rdmol, force=True, cleanIt=True) # Hopefully this worked, otherwise something's wrong if ( rdatom.HasProp("_CIPCode") and rdatom.GetProp("_CIPCode") == atom.stereochemistry ): continue # Keep track of undefined stereo atoms. We'll force stereochemistry # at the end to avoid the next AssignStereochemistry to overwrite. if not rdatom.HasProp("_CIPCode"): undefined_stereo_atoms[rdatom] = atom.stereochemistry continue # Something is wrong. err_msg = ( "Unknown atom stereochemistry encountered in to_rdkit. " "Desired stereochemistry: {}. Set stereochemistry {}".format( atom.stereochemistry, rdatom.GetProp("_CIPCode") ) ) raise RuntimeError(err_msg) # Copy bond stereo info from molecule to rdmol. cls._assign_rdmol_bonds_stereo(molecule, rdmol) # Set coordinates if we have them if molecule._conformers: for conformer in molecule._conformers: rdmol_conformer = Chem.Conformer() for atom_idx in range(molecule.n_atoms): x, y, z = conformer[atom_idx, :].value_in_unit(unit.angstrom) rdmol_conformer.SetAtomPosition(atom_idx, Geometry.Point3D(x, y, z)) rdmol.AddConformer(rdmol_conformer, assignId=True) # Retain charges, if present if not (molecule._partial_charges is None): rdk_indexed_charges = np.zeros(shape=molecule.n_atoms, dtype=float) for atom_idx, charge in enumerate(molecule._partial_charges): charge_unitless = charge.value_in_unit(unit.elementary_charge) rdk_indexed_charges[atom_idx] = charge_unitless for atom_idx, rdk_atom in enumerate(rdmol.GetAtoms()): rdk_atom.SetDoubleProp("PartialCharge", rdk_indexed_charges[atom_idx]) # Note: We could put this outside the "if" statement, which would result in all partial charges in the # resulting file being set to "n/a" if they weren't set in the Open Force Field Toolkit ``Molecule`` Chem.CreateAtomDoublePropertyList(rdmol, "PartialCharge") # Cleanup the rdmol rdmol.UpdatePropertyCache(strict=False) Chem.GetSSSR(rdmol) # Forcefully assign stereo information on the atoms that RDKit # can't figure out. This must be done last as calling AssignStereochemistry # again will delete these properties (see #196). for rdatom, stereochemistry in undefined_stereo_atoms.items(): rdatom.SetProp("_CIPCode", stereochemistry) # Return non-editable version return Chem.Mol(rdmol) def to_inchi(self, molecule, fixed_hydrogens=False): """ Create an InChI string for the molecule using the RDKit Toolkit. InChI is a standardised representation that does not capture tautomers unless specified using the fixed hydrogen layer. For information on InChi see here https://iupac.org/who-we-are/divisions/division-details/inchi/ Parameters ---------- molecule : An openff.toolkit.topology.Molecule The molecule to convert into a SMILES. fixed_hydrogens: bool, default=False If a fixed hydrogen layer should be added to the InChI, if `True` this will produce a non standard specific InChI string of the molecule. Returns -------- inchi: str The InChI string of the molecule. """ from rdkit import Chem rdmol = self.to_rdkit(molecule) if fixed_hydrogens: inchi = Chem.MolToInchi(rdmol, options="-FixedH") else: inchi = Chem.MolToInchi(rdmol) return inchi def to_inchikey(self, molecule, fixed_hydrogens=False): """ Create an InChIKey for the molecule using the RDKit Toolkit. InChIKey is a standardised representation that does not capture tautomers unless specified using the fixed hydrogen layer. For information on InChi see here https://iupac.org/who-we-are/divisions/division-details/inchi/ Parameters ---------- molecule : An openff.toolkit.topology.Molecule The molecule to convert into a SMILES. fixed_hydrogens: bool, default=False If a fixed hydrogen layer should be added to the InChI, if `True` this will produce a non standard specific InChI string of the molecule. Returns -------- inchi_key: str The InChIKey representation of the molecule. """ from rdkit import Chem rdmol = self.to_rdkit(molecule) if fixed_hydrogens: inchi_key = Chem.MolToInchiKey(rdmol, options="-FixedH") else: inchi_key = Chem.MolToInchiKey(rdmol) return inchi_key def get_tagged_smarts_connectivity(self, smarts): """ Returns a tuple of tuples indicating connectivity between tagged atoms in a SMARTS string. Does not return bond order. Parameters ---------- smarts : str The tagged SMARTS to analyze Returns ------- unique_tags : tuple of int A sorted tuple of all unique tagged atom map indices. tagged_atom_connectivity : tuple of tuples of int, shape n_tagged_bonds x 2 A tuple of tuples, where each inner tuple is a pair of tagged atoms (tag_idx_1, tag_idx_2) which are bonded. The inner tuples are ordered smallest-to-largest, and the tuple of tuples is ordered lexically. So the return value for an improper torsion would be ((1, 2), (2, 3), (2, 4)). Raises ------ SMIRKSParsingError If RDKit was unable to parse the provided smirks/tagged smarts """ from rdkit import Chem from openff.toolkit.typing.chemistry import SMIRKSParsingError ss = Chem.MolFromSmarts(smarts) if ss is None: raise SMIRKSParsingError(f"RDKit was unable to parse SMIRKS {smarts}") unique_tags = set() connections = set() for at1 in ss.GetAtoms(): if at1.GetAtomMapNum() == 0: continue unique_tags.add(at1.GetAtomMapNum()) for at2 in at1.GetNeighbors(): if at2.GetAtomMapNum() == 0: continue cxn_to_add = sorted([at1.GetAtomMapNum(), at2.GetAtomMapNum()]) connections.add(tuple(cxn_to_add)) connections = tuple(sorted(list(connections))) unique_tags = tuple(sorted(list(unique_tags))) return unique_tags, connections @staticmethod def _find_smarts_matches(rdmol, smirks, aromaticity_model="OEAroModel_MDL"): """Find all sets of atoms in the provided RDKit molecule that match the provided SMARTS string. Parameters ---------- rdmol : rdkit.Chem.Mol rdmol to process with the SMIRKS in order to find matches smarts : str SMARTS string with any number of sequentially tagged atoms. If there are N tagged atoms numbered 1..N, the resulting matches will be N-tuples of atoms that match the corresponding tagged atoms. aromaticity_model : str, optional, default='OEAroModel_MDL' OpenEye aromaticity model designation as a string, such as ``OEAroModel_MDL``. Molecule is prepared with this aromaticity model prior to querying. Returns ------- matches : list of tuples of atoms indices within the ``rdmol`` matches[index] is an N-tuple of atom numbers from the ``rdmol`` Matches are returned in no guaranteed order. # TODO: What is returned if no matches are found? An empty list, or None? # TODO: Ensure that SMARTS numbers 1, 2, 3... are rendered into order of returnd matches indexed by 0, 1, 2... .. notes :: * Raises ``ValueError`` if ``smarts`` query is malformed """ from rdkit import Chem # Make a copy of the molecule rdmol = Chem.Mol(rdmol) # Use designated aromaticity model if aromaticity_model == "OEAroModel_MDL": Chem.SanitizeMol(rdmol, Chem.SANITIZE_ALL ^ Chem.SANITIZE_SETAROMATICITY) Chem.SetAromaticity(rdmol, Chem.AromaticityModel.AROMATICITY_MDL) else: # Only the OEAroModel_MDL is supported for now raise ValueError("Unknown aromaticity model: {}".aromaticity_models) # Set up query. qmol = Chem.MolFromSmarts(smirks) # cannot catch the error if qmol is None: raise ValueError( 'RDKit could not parse the SMIRKS string "{}"'.format(smirks) ) # Create atom mapping for query molecule idx_map = dict() for atom in qmol.GetAtoms(): smirks_index = atom.GetAtomMapNum() if smirks_index != 0: idx_map[smirks_index - 1] = atom.GetIdx() map_list = [idx_map[x] for x in sorted(idx_map)] # Perform matching matches = list() # choose the largest unsigned int without overflow # since the C++ signature is a uint max_matches = np.iinfo(np.uintc).max for match in rdmol.GetSubstructMatches( qmol, uniquify=False, maxMatches=max_matches, useChirality=True ): mas = [match[x] for x in map_list] matches.append(tuple(mas)) return matches def find_smarts_matches(self, molecule, smarts, aromaticity_model="OEAroModel_MDL"): """ Find all SMARTS matches for the specified molecule, using the specified aromaticity model. .. warning :: This API is experimental and subject to change. Parameters ---------- molecule : openff.toolkit.topology.Molecule The molecule for which all specified SMARTS matches are to be located smarts : str SMARTS string with optional SMIRKS-style atom tagging aromaticity_model : str, optional, default='OEAroModel_MDL' Molecule is prepared with this aromaticity model prior to querying. .. note :: Currently, the only supported ``aromaticity_model`` is ``OEAroModel_MDL`` """ rdmol = self.to_rdkit(molecule, aromaticity_model=aromaticity_model) return self._find_smarts_matches( rdmol, smarts, aromaticity_model="OEAroModel_MDL" ) # -------------------------------- # Stereochemistry RDKit utilities. # -------------------------------- def find_rings(self, molecule): """Find the rings in a given molecule. .. note :: For systems containing some special cases of connected rings, this function may not be well-behaved and may report a different number rings than expected. Some problematic cases include networks of many (5+) rings or bicyclic moieties (i.e. norbornane). Parameters ---------- molecule : openff.toolkit.topology.Molecule The molecule for which rings are to be found Returns ------- rings : tuple of tuples of atom indices Nested tuples, each containing the indices of atoms in each ring """ rdmol = molecule.to_rdkit() ring_info = rdmol.GetRingInfo() rings = ring_info.AtomRings() return rings @staticmethod def _find_undefined_stereo_atoms(rdmol, assign_stereo=False): """Find the chiral atoms with undefined stereochemsitry in the RDMol. Parameters ---------- rdmol : rdkit.RDMol The RDKit molecule. assign_stereo : bool, optional, default=False As a side effect, this function calls ``Chem.AssignStereochemistry()`` so by default we work on a molecule copy. Set this to ``True`` to avoid making a copy and assigning the stereochemistry to the Mol object. Returns ------- undefined_atom_indices : List[int] A list of atom indices that are chiral centers with undefined stereochemistry. See Also -------- rdkit.Chem.FindMolChiralCenters """ from rdkit import Chem if not assign_stereo: # Avoid modifying the original molecule. rdmol = copy.deepcopy(rdmol) # Flag possible chiral centers with the "_ChiralityPossible". Chem.AssignStereochemistry(rdmol, force=True, flagPossibleStereoCenters=True) # Find all atoms with undefined stereo. undefined_atom_indices = [] for atom_idx, atom in enumerate(rdmol.GetAtoms()): if atom.GetChiralTag() == Chem.ChiralType.CHI_UNSPECIFIED and atom.HasProp( "_ChiralityPossible" ): undefined_atom_indices.append(atom_idx) return undefined_atom_indices @staticmethod def _find_undefined_stereo_bonds(rdmol): """Find the chiral atoms with undefined stereochemsitry in the RDMol. Parameters ---------- rdmol : rdkit.RDMol The RDKit molecule. Returns ------- undefined_bond_indices : List[int] A list of bond indices with undefined stereochemistry. See Also -------- Chem.EnumerateStereoisomers._getFlippers Links ----- https://github.com/rdkit/rdkit/blob/master/Code/GraphMol/Chirality.cpp#L1509-L1515 This comment in FindPotentialStereoBonds mention that the method ignores ring bonds. https://github.com/DrrDom/rdk/blob/master/gen_stereo_rdkit3.py The function get_unspec_double_bonds() in this module looks like may solve the problem with the rings. """ from rdkit import Chem # Copy the molecule to avoid side effects. Chem.FindPotentialStereoBonds # assign Bond.STEREOANY to unspecific bond, which make subsequent calls # of Chem.AssignStereochemistry ignore the bond even if there are # ENDDOWNRIGHT/ENDUPRIGHT bond direction indications. rdmol_copy = copy.deepcopy(rdmol) # Clear any previous assignments on the bonds, since FindPotentialStereo may not overwrite it for bond in rdmol_copy.GetBonds(): bond.SetStereo(Chem.BondStereo.STEREONONE) # This function assigns Bond.GetStereo() == Bond.STEREOANY to bonds with # possible stereochemistry. Chem.FindPotentialStereoBonds(rdmol_copy, cleanIt=True) # Any TRULY stereogenic bonds in the molecule are now marked as STEREOANY in rdmol_copy. # Iterate through all the bonds, and for the ones where rdmol_copy is marked as STEREOANY, # ensure that they are cis/trans/E/Z (tested here be ensuring that they're NOT either # # of the other possible types (NONE or ANY)) undefined_bond_indices = [] for bond_idx, (orig_bond, repercieved_bond) in enumerate( zip(rdmol.GetBonds(), rdmol_copy.GetBonds()) ): # print(repercieved_bond.GetStereo(), orig_bond.GetStereo()) if (repercieved_bond.GetStereo() == Chem.BondStereo.STEREOANY) and ( (orig_bond.GetStereo() == Chem.BondStereo.STEREOANY) or (orig_bond.GetStereo() == Chem.BondStereo.STEREONONE) ): undefined_bond_indices.append(bond_idx) return undefined_bond_indices @classmethod def _detect_undefined_stereo(cls, rdmol, err_msg_prefix="", raise_warning=False): """Raise UndefinedStereochemistryError if the RDMol has undefined stereochemistry. Parameters ---------- rdmol : rdkit.Chem.Mol The RDKit molecule. err_msg_prefix : str, optional A string to prepend to the error/warning message. raise_warning : bool, optional, default=False If True, a warning is issued instead of an exception. Raises ------ UndefinedStereochemistryError If the RDMol has undefined atom or bond stereochemistry. """ # Find undefined atom/bond stereochemistry. undefined_atom_indices = cls._find_undefined_stereo_atoms(rdmol) undefined_bond_indices = cls._find_undefined_stereo_bonds(rdmol) # Build error message. if len(undefined_atom_indices) == 0 and len(undefined_bond_indices) == 0: msg = None else: msg = err_msg_prefix + "RDMol has unspecified stereochemistry. " # The "_Name" property is not always assigned. if rdmol.HasProp("_Name"): msg += "RDMol name: " + rdmol.GetProp("_Name") # Details about undefined atoms. if len(undefined_atom_indices) > 0: msg += "Undefined chiral centers are:\n" for undefined_atom_idx in undefined_atom_indices: msg += " - Atom {symbol} (index {index})\n".format( symbol=rdmol.GetAtomWithIdx(undefined_atom_idx).GetSymbol(), index=undefined_atom_idx, ) # Details about undefined bond. if len(undefined_bond_indices) > 0: msg += "Bonds with undefined stereochemistry are:\n" for undefined_bond_idx in undefined_bond_indices: bond = rdmol.GetBondWithIdx(undefined_bond_idx) atom1, atom2 = bond.GetBeginAtom(), bond.GetEndAtom() msg += " - Bond {bindex} (atoms {aindex1}-{aindex2} of element ({symbol1}-{symbol2})\n".format( bindex=undefined_bond_idx, aindex1=atom1.GetIdx(), aindex2=atom2.GetIdx(), symbol1=atom1.GetSymbol(), symbol2=atom2.GetSymbol(), ) if msg is not None: if raise_warning: msg = "Warning (not error because allow_undefined_stereo=True): " + msg logger.warning(msg) else: msg = "Unable to make OFFMol from RDMol: " + msg raise UndefinedStereochemistryError(msg) @staticmethod def _flip_rdbond_direction(rdbond, paired_rdbonds): """Flip the rdbond and all those paired to it. Parameters ---------- rdbond : rdkit.Chem.Bond The Bond whose direction needs to be flipped. paired_rdbonds : Dict[Tuple[int], List[rdkit.Chem.Bond]] Maps bond atom indices that are assigned a bond direction to the bonds on the other side of the double bond. """ from rdkit import Chem # The function assumes that all bonds are either up or down. supported_directions = {Chem.BondDir.ENDUPRIGHT, Chem.BondDir.ENDDOWNRIGHT} def _flip(b, paired, flipped, ignored): # The function assumes that all bonds are either up or down. assert b.GetBondDir() in supported_directions bond_atom_indices = (b.GetBeginAtomIdx(), b.GetEndAtomIdx()) # Check that we haven't flipped this bond already. if bond_atom_indices in flipped: # This should never happen. raise RuntimeError("Cannot flip the bond direction consistently.") # Flip the bond. if b.GetBondDir() == Chem.BondDir.ENDUPRIGHT: b.SetBondDir(Chem.BondDir.ENDDOWNRIGHT) else: b.SetBondDir(Chem.BondDir.ENDUPRIGHT) flipped.add(bond_atom_indices) # Flip all the paired bonds as well (if there are any). if bond_atom_indices in paired: for paired_rdbond in paired[bond_atom_indices]: # Don't flip the bond that was flipped in the upper-level recursion. if ( paired_rdbond.GetBeginAtomIdx(), paired_rdbond.GetEndAtomIdx(), ) != ignored: # Don't flip this bond in the next recursion. _flip(paired_rdbond, paired, flipped, ignored=bond_atom_indices) _flip(rdbond, paired_rdbonds, flipped=set(), ignored=None) @classmethod def _assign_rdmol_bonds_stereo(cls, offmol, rdmol): """Copy the info about bonds stereochemistry from the OFF Molecule to RDKit Mol.""" from rdkit import Chem # Map the bonds indices that are assigned bond direction # to the bond on the other side of the double bond. # (atom_index1, atom_index2) -> List[rdkit.Chem.Bond] paired_bonds = {} for bond in offmol.bonds: # No need to do anything with bonds without stereochemistry. if not bond.stereochemistry: continue # Isolate stereo RDKit bond object. rdbond_atom_indices = ( bond.atom1.molecule_atom_index, bond.atom2.molecule_atom_index, ) stereo_rdbond = rdmol.GetBondBetweenAtoms(*rdbond_atom_indices) # Collect all neighboring rdbonds of atom1 and atom2. neighbor_rdbonds1 = [ rdmol.GetBondBetweenAtoms( n.molecule_atom_index, bond.atom1.molecule_atom_index ) for n in bond.atom1.bonded_atoms if n != bond.atom2 ] neighbor_rdbonds2 = [ rdmol.GetBondBetweenAtoms( bond.atom2.molecule_atom_index, n.molecule_atom_index ) for n in bond.atom2.bonded_atoms if n != bond.atom1 ] # Select only 1 neighbor bond per atom out of the two. neighbor_rdbonds = [] for i, rdbonds in enumerate([neighbor_rdbonds1, neighbor_rdbonds2]): # If there are no neighbors for which we have already # assigned the bond direction, just pick the first one. neighbor_rdbonds.append(rdbonds[0]) # Otherwise, pick neighbor that was already assigned to # avoid inconsistencies and keep the tree non-cyclic. for rdb in rdbonds: if (rdb.GetBeginAtomIdx(), rdb.GetBeginAtomIdx()) in paired_bonds: neighbor_rdbonds[i] = rdb break # Assign a random direction to the bonds that were not already assigned # keeping track of which bond would be best to flip later (i.e. does that # are not already determining the stereochemistry of another double bond). flipped_rdbond = neighbor_rdbonds[0] for rdb in neighbor_rdbonds: if (rdb.GetBeginAtomIdx(), rdb.GetEndAtomIdx()) not in paired_bonds: rdb.SetBondDir(Chem.BondDir.ENDUPRIGHT) # Set this bond as a possible bond to flip. flipped_rdbond = rdb Chem.AssignStereochemistry(rdmol, cleanIt=True, force=True) # Verify that the current directions give us the desired stereochemistries. assert bond.stereochemistry in {"E", "Z"} if bond.stereochemistry == "E": desired_rdk_stereo_code = Chem.rdchem.BondStereo.STEREOE else: desired_rdk_stereo_code = Chem.rdchem.BondStereo.STEREOZ # If that doesn't work, flip the direction of one bond preferring # those that are not already determining the stereo of another bond. if stereo_rdbond.GetStereo() != desired_rdk_stereo_code: cls._flip_rdbond_direction(flipped_rdbond, paired_bonds) Chem.AssignStereochemistry(rdmol, cleanIt=True, force=True) # The stereo should be set correctly here. assert stereo_rdbond.GetStereo() == desired_rdk_stereo_code # Update paired bonds map. neighbor_bond_indices = [ (rdb.GetBeginAtomIdx(), rdb.GetEndAtomIdx()) for rdb in neighbor_rdbonds ] for i, bond_indices in enumerate(neighbor_bond_indices): try: paired_bonds[bond_indices].append(neighbor_rdbonds[1 - i]) except KeyError: paired_bonds[bond_indices] = [neighbor_rdbonds[1 - i]] class AmberToolsToolkitWrapper(ToolkitWrapper): """ AmberTools toolkit wrapper .. warning :: This API is experimental and subject to change. """ _toolkit_name = "AmberTools" _toolkit_installation_instructions = ( "The AmberTools toolkit (free and open source) can be found at " "https://anaconda.org/conda-forge/ambertools" ) def __init__(self): super().__init__() self._toolkit_file_read_formats = [] self._toolkit_file_write_formats = [] if not self.is_available(): raise ToolkitUnavailableException( f"The required toolkit {self._toolkit_name} is not " f"available. {self._toolkit_installation_instructions}" ) # TODO: More reliable way to extract AmberTools version out = subprocess.check_output(["antechamber", "-L"]) ambertools_version = out.decode("utf-8").split("\n")[1].split()[3].strip(":") self._toolkit_version = ambertools_version # TODO: Find AMBERHOME or executable home, checking miniconda if needed # Store an instance of an RDKitToolkitWrapper for file I/O self._rdkit_toolkit_wrapper = RDKitToolkitWrapper() @staticmethod def is_available(): """ Check whether the AmberTools toolkit is installed Returns ------- is_installed : bool True if AmberTools is installed, False otherwise. """ # TODO: Check all tools needed # TODO: How should we implement find_executable? ANTECHAMBER_PATH = find_executable("antechamber") if ANTECHAMBER_PATH is None: return False # AmberToolsToolkitWrapper needs RDKit to do basically anything, since its interface requires SDF I/O if not (RDKitToolkitWrapper.is_available()): return False return True def assign_partial_charges( self, molecule, partial_charge_method=None, use_conformers=None, strict_n_conformers=False, _cls=None, ): """ Compute partial charges with AmberTools using antechamber/sqm, and assign the new values to the partial_charges attribute. .. warning :: This API experimental and subject to change. .. todo :: * Do we want to also allow ESP/RESP charges? Parameters ---------- molecule : openff.toolkit.topology.Molecule Molecule for which partial charges are to be computed partial_charge_method : str, optional, default=None The charge model to use. One of ['gasteiger', 'am1bcc', 'am1-mulliken']. If None, 'am1-mulliken' will be used. use_conformers : iterable of simtk.unit.Quantity-wrapped numpy arrays, each with shape (n_atoms, 3) and dimension of distance. Optional, default = None List of (n_atoms x 3) simtk.unit.Quantities to use for partial charge calculation. If None, an appropriate number of conformers will be generated. strict_n_conformers : bool, default=False Whether to raise an exception if an invalid number of conformers is provided for the given charge method. If this is False and an invalid number of conformers is found, a warning will be raised. _cls : class Molecule constructor Raises ------ ChargeMethodUnavailableError if the requested charge method can not be handled by this toolkit ChargeCalculationError if the charge method is supported by this toolkit, but fails """ import os import subprocess from openff.toolkit.topology import Molecule if partial_charge_method is None: partial_charge_method = "am1-mulliken" else: # Standardize method name for string comparisons partial_charge_method = partial_charge_method.lower() SUPPORTED_CHARGE_METHODS = { "am1bcc": { "antechamber_keyword": "bcc", "min_confs": 1, "max_confs": 1, "rec_confs": 1, }, "am1-mulliken": { "antechamber_keyword": "mul", "min_confs": 1, "max_confs": 1, "rec_confs": 1, }, "gasteiger": { "antechamber_keyword": "gas", "min_confs": 0, "max_confs": 0, "rec_confs": 0, }, } if partial_charge_method not in SUPPORTED_CHARGE_METHODS: raise ChargeMethodUnavailableError( f"partial_charge_method '{partial_charge_method}' is not available from AmberToolsToolkitWrapper. " f"Available charge methods are {list(SUPPORTED_CHARGE_METHODS.keys())} " ) charge_method = SUPPORTED_CHARGE_METHODS[partial_charge_method] if _cls is None: from openff.toolkit.topology.molecule import Molecule _cls = Molecule # Make a temporary copy of the molecule, since we'll be messing with its conformers mol_copy = _cls(molecule) if use_conformers is None: if charge_method["rec_confs"] == 0: mol_copy._conformers = None else: mol_copy.generate_conformers( n_conformers=charge_method["rec_confs"], rms_cutoff=0.25 * unit.angstrom, toolkit_registry=RDKitToolkitWrapper(), ) # TODO: What's a "best practice" RMS cutoff to use here? else: mol_copy._conformers = None for conformer in use_conformers: mol_copy._add_conformer(conformer) self._check_n_conformers( mol_copy, partial_charge_method=partial_charge_method, min_confs=charge_method["min_confs"], max_confs=charge_method["max_confs"], strict_n_conformers=strict_n_conformers, ) # Find the path to antechamber # TODO: How should we implement find_executable? ANTECHAMBER_PATH = find_executable("antechamber") if ANTECHAMBER_PATH is None: raise AntechamberNotFoundError( "Antechamber not found, cannot run charge_mol()" ) # Compute charges with tempfile.TemporaryDirectory() as tmpdir: with temporary_cd(tmpdir): net_charge = mol_copy.total_charge / unit.elementary_charge # Write out molecule in SDF format ## TODO: How should we handle multiple conformers? self._rdkit_toolkit_wrapper.to_file( mol_copy, "molecule.sdf", file_format="sdf" ) # Compute desired charges # TODO: Add error handling if antechamber chokes short_charge_method = charge_method["antechamber_keyword"] subprocess.check_output( [ "antechamber", "-i", "molecule.sdf", "-fi", "sdf", "-o", "charged.mol2", "-fo", "mol2", "-pf", "yes", "-dr", "n", "-c", short_charge_method, "-nc", str(net_charge), ] ) # Write out just charges subprocess.check_output( [ "antechamber", "-dr", "n", "-i", "charged.mol2", "-fi", "mol2", "-o", "charges2.mol2", "-fo", "mol2", "-c", "wc", "-cf", "charges.txt", "-pf", "yes", ] ) # Check to ensure charges were actually produced if not os.path.exists("charges.txt"): # TODO: copy files into local directory to aid debugging? raise ChargeCalculationError( "Antechamber/sqm partial charge calculation failed on " "molecule {} (SMILES {})".format( molecule.name, molecule.to_smiles() ) ) # Read the charges with open("charges.txt", "r") as infile: contents = infile.read() text_charges = contents.split() charges = np.zeros([molecule.n_atoms], np.float64) for index, token in enumerate(text_charges): charges[index] = float(token) # TODO: Ensure that the atoms in charged.mol2 are in the same order as in molecule.sdf charges = unit.Quantity(charges, unit.elementary_charge) molecule.partial_charges = charges def compute_partial_charges_am1bcc( self, molecule, use_conformers=None, strict_n_conformers=False ): """ Compute partial charges with AmberTools using antechamber/sqm. This will calculate AM1-BCC charges on the first conformer only. .. warning :: This API is experimental and subject to change. Parameters ---------- molecule : Molecule Molecule for which partial charges are to be computed use_conformers : iterable of simtk.unit.Quantity-wrapped numpy arrays, each with shape (n_atoms, 3) and dimension of distance. Optional, default = None Coordinates to use for partial charge calculation. If None, an appropriate number of conformers will be generated. strict_n_conformers : bool, default=False Whether to raise an exception if an invalid number of conformers is provided. If this is False and an invalid number of conformers is found, a warning will be raised instead of an Exception. Returns ------- charges : numpy.array of shape (natoms) of type float The partial charges """ import warnings warnings.warn( "compute_partial_charges_am1bcc will be deprecated in an upcoming release. " "Use assign_partial_charges(partial_charge_method='am1bcc') instead.", DeprecationWarning, ) self.assign_partial_charges( molecule, partial_charge_method="AM1BCC", use_conformers=use_conformers, strict_n_conformers=strict_n_conformers, ) return molecule.partial_charges def _modify_sqm_in_to_request_bond_orders(self, file_path): """ Modify a sqm.in file produced by antechamber to include the "printbondorders=1" directive in the header. This method will overwrite the original file. Parameters ---------- file_path : str The path to sqm.in """ data = open(file_path).read() # Original sqm.in file headerlooks like: # Run semi-empirical minimization # &qmmm # qm_theory='AM1', grms_tol=0.0005, # scfconv=1.d-10, ndiis_attempts=700, qmcharge=0, # / # ... (atom coordinates in something like XYZ format) ... # To get WBOs, we need to add "printbondorders=1" to the list of keywords # First, split the sqm.in text at the "/" mark at the end of the header datasp = data.split("/") # Insert the "printbondorders" directive in a new line and re-add the "/" datasp.insert(1, "printbondorders=1, \n /") # Reassemble the file text new_data = "".join(datasp) # Write the new file contents, overwriting the original file. with open(file_path, "w") as of: of.write(new_data) def _get_fractional_bond_orders_from_sqm_out( self, file_path, validate_elements=None ): """ Process a SQM output file containing bond orders, and return a dict of the form dict[atom_1_index, atom_2_index] = fractional_bond_order Parameters ---------- file_path : str File path for sqm output file validate_elements : iterable of str The element symbols expected in molecule index order. A ValueError will be raised if the elements are not found in this order. Returns ------- bond_orders : dict[(int, int)]: float A dictionary where the keys are tuples of two atom indices and the values are floating-point bond orders. The keys are sorted in ascending order, such that the lower atom index is key[0] and the higher is key[1]. """ # Example sqm.out section with WBOs: # Bond Orders # # QMMM: NUM1 ELEM1 NUM2 ELEM2 BOND_ORDER # QMMM: 2 C 1 C 1.41107532 # QMMM: 3 C 1 C 1.41047804 # ... # QMMM: 15 H 13 H 0.00000954 # QMMM: 15 H 14 H 0.00000813 # # --------- Calculation Completed ---------- data = open(file_path).read() begin_sep = """ Bond Orders QMMM: NUM1 ELEM1 NUM2 ELEM2 BOND_ORDER """ end_sep = """ --------- Calculation Completed ---------- """ # Extract the chunk of text between begin_sep and end_sep, and split it by newline fbo_lines = data.split(begin_sep)[1].split(end_sep)[0].split("\n") # Iterate over the lines and populate the dict to return bond_orders = dict() for line in fbo_lines: linesp = line.split() atom_index_1 = int(linesp[1]) atom_element_1 = linesp[2] atom_index_2 = int(linesp[3]) atom_element_2 = linesp[4] bond_order = float(linesp[5]) # If validate_elements was provided, ensure that the ordering of element symbols is what we expected if validate_elements is not None: if (atom_element_1 != validate_elements[atom_index_1 - 1]) or ( atom_element_2 != validate_elements[atom_index_2 - 1] ): # raise ValueError('\n'.join(fbo_lines)) raise ValueError( f"Elements or indexing in sqm output differ from expectation. " f"Expected {validate_elements[atom_index_1]} with index {atom_index_1} and " f"{validate_elements[atom_index_2]} with index {atom_index_2}, " f"but SQM output has {atom_element_1} and {atom_element_2} for the same atoms." ) # To make lookup easier, we identify bonds as integer tuples with the lowest atom index # first and the highest second. index_tuple = tuple(sorted([atom_index_1, atom_index_2])) bond_orders[index_tuple] = bond_order return bond_orders def assign_fractional_bond_orders( self, molecule, bond_order_model=None, use_conformers=None, _cls=None ): """ Update and store list of bond orders this molecule. Bond orders are stored on each bond, in the `bond.fractional_bond_order` attribute. .. warning :: This API is experimental and subject to change. Parameters ---------- molecule : openff.toolkit.topology.molecule Molecule The molecule to assign wiberg bond orders to bond_order_model : str, optional, default=None The charge model to use. Only allowed value is 'am1-wiberg'. If None, 'am1-wiberg' will be used. use_conformers : iterable of simtk.unit.Quantity(np.array) with shape (n_atoms, 3) and dimension of distance, optional, default=None The conformers to use for fractional bond order calculation. If None, an appropriate number of conformers will be generated by an available ToolkitWrapper. _cls : class Molecule constructor """ from openff.toolkit.topology import Molecule # Find the path to antechamber # TODO: How should we implement find_executable? ANTECHAMBER_PATH = find_executable("antechamber") if ANTECHAMBER_PATH is None: raise AntechamberNotFoundError( "Antechamber not found, cannot run " "AmberToolsToolkitWrapper.assign_fractional_bond_orders()" ) if _cls is None: from openff.toolkit.topology.molecule import Molecule _cls = Molecule # Make a copy since we'll be messing with this molecule's conformers temp_mol = _cls(molecule) if use_conformers is None: temp_mol.generate_conformers( n_conformers=1, toolkit_registry=self._rdkit_toolkit_wrapper, ) else: temp_mol._conformers = None for conformer in use_conformers: temp_mol._add_conformer(conformer) if len(temp_mol.conformers) == 0: raise ValueError( "No conformers present in molecule submitted for fractional bond order calculation. Consider " "loading the molecule from a file with geometry already present or running " "molecule.generate_conformers() before calling molecule.assign_fractional_bond_orders" ) # Compute bond orders bond_order_model_to_antechamber_keyword = {"am1-wiberg": "mul"} supported_bond_order_models = list( bond_order_model_to_antechamber_keyword.keys() ) if bond_order_model is None: bond_order_model = "am1-wiberg" bond_order_model = bond_order_model.lower() if bond_order_model not in supported_bond_order_models: raise ValueError( f"Bond order model '{bond_order_model}' is not supported by AmberToolsToolkitWrapper. " f"Supported models are {supported_bond_order_models}" ) ac_charge_keyword = bond_order_model_to_antechamber_keyword[bond_order_model] bond_orders = defaultdict(list) for conformer in [*temp_mol.conformers]: with tempfile.TemporaryDirectory() as tmpdir: with temporary_cd(tmpdir): net_charge = temp_mol.total_charge # Write out molecule in SDF format temp_mol._conformers = [conformer] self._rdkit_toolkit_wrapper.to_file( temp_mol, "molecule.sdf", file_format="sdf" ) # Prepare sqm.in file as if we were going to run charge calc # TODO: Add error handling if antechamber chokes subprocess.check_output( [ "antechamber", "-i", "molecule.sdf", "-fi", "sdf", "-o", "sqm.in", "-fo", "sqmcrt", "-pf", "yes", "-c", ac_charge_keyword, "-nc", str(net_charge), ] ) # Modify sqm.in to request bond order calculation self._modify_sqm_in_to_request_bond_orders("sqm.in") # Run sqm to get bond orders subprocess.check_output( ["sqm", "-i", "sqm.in", "-o", "sqm.out", "-O"] ) # Ensure that antechamber/sqm did not change the indexing by checking against # an ordered list of element symbols for this molecule expected_elements = [at.element.symbol for at in molecule.atoms] conformer_bond_orders = ( self._get_fractional_bond_orders_from_sqm_out( "sqm.out", validate_elements=expected_elements ) ) for bond_indices, value in conformer_bond_orders.items(): bond_orders[bond_indices].append(value) # Note that sqm calculate WBOs for ALL PAIRS of atoms, not just those that have # bonds defined in the original molecule. So here we iterate over the bonds in # the original molecule and only nab the WBOs for those. for bond in molecule.bonds: # The atom index tuples that act as bond indices are ordered from lowest to highest by # _get_fractional_bond_orders_from_sqm_out, so here we make sure that we look them up in # sorted order as well sorted_atom_indices = sorted( tuple([bond.atom1_index + 1, bond.atom2_index + 1]) ) bond.fractional_bond_order = np.mean( bond_orders[tuple(sorted_atom_indices)] ) # ============================================================================================= # Toolkit registry # ============================================================================================= class ToolkitRegistry: """ Registry for ToolkitWrapper objects Examples -------- Register toolkits in a specified order, skipping if unavailable >>> from openff.toolkit.utils.toolkits import ToolkitRegistry >>> toolkit_precedence = [OpenEyeToolkitWrapper, RDKitToolkitWrapper, AmberToolsToolkitWrapper] >>> toolkit_registry = ToolkitRegistry(toolkit_precedence) >>> toolkit_registry ToolkitRegistry containing OpenEye Toolkit, The RDKit, AmberTools Register all available toolkits (in the order OpenEye, RDKit, AmberTools, built-in) >>> toolkits = [OpenEyeToolkitWrapper, RDKitToolkitWrapper, AmberToolsToolkitWrapper, BuiltInToolkitWrapper] >>> toolkit_registry = ToolkitRegistry(toolkit_precedence=toolkits) >>> toolkit_registry ToolkitRegistry containing OpenEye Toolkit, The RDKit, AmberTools, Built-in Toolkit Retrieve the global singleton toolkit registry, which is created when this module is imported from all available toolkits: >>> from openff.toolkit.utils.toolkits import GLOBAL_TOOLKIT_REGISTRY as toolkit_registry >>> toolkit_registry ToolkitRegistry containing OpenEye Toolkit, The RDKit, AmberTools, Built-in Toolkit Note that this will contain different ToolkitWrapper objects based on what toolkits are currently installed. .. warning :: This API is experimental and subject to change. """ def __init__( self, toolkit_precedence=[], exception_if_unavailable=True, _register_imported_toolkit_wrappers=False, ): """ Create an empty toolkit registry. Parameters ---------- toolkit_precedence : list, default=[] List of toolkit wrapper classes, in order of desired precedence when performing molecule operations. If None, no toolkits will be registered. exception_if_unavailable : bool, optional, default=True If True, an exception will be raised if the toolkit is unavailable _register_imported_toolkit_wrappers : bool, optional, default=False If True, will attempt to register all imported ToolkitWrapper subclasses that can be found in the order of toolkit_precedence, if specified. If toolkit_precedence is not specified, the default order is [OpenEyeToolkitWrapper, RDKitToolkitWrapper, AmberToolsToolkitWrapper, BuiltInToolkitWrapper]. """ self._toolkits = list() toolkits_to_register = list() if _register_imported_toolkit_wrappers: if toolkit_precedence is None: toolkit_precedence = [ OpenEyeToolkitWrapper, RDKitToolkitWrapper, AmberToolsToolkitWrapper, BuiltInToolkitWrapper, ] all_importable_toolkit_wrappers = all_subclasses(ToolkitWrapper) for toolkit in toolkit_precedence: if toolkit in all_importable_toolkit_wrappers: toolkits_to_register.append(toolkit) else: if toolkit_precedence: toolkits_to_register = toolkit_precedence if toolkits_to_register: for toolkit in toolkits_to_register: self.register_toolkit( toolkit, exception_if_unavailable=exception_if_unavailable ) @property def registered_toolkits(self): """ List registered toolkits. .. warning :: This API is experimental and subject to change. .. todo :: Should this return a generator? Deep copies? Classes? Toolkit names? Returns ------- toolkits : iterable of toolkit objects """ return list(self._toolkits) @property def registered_toolkit_versions(self): """ Return a dict containing the version of each registered toolkit. .. warning :: This API is experimental and subject to change. Returns ------- toolkit_versions : dict[str, str] A dictionary mapping names and versions of wrapped toolkits """ return dict( (tk.toolkit_name, tk.toolkit_version) for tk in self.registered_toolkits ) def register_toolkit(self, toolkit_wrapper, exception_if_unavailable=True): """ Register the provided toolkit wrapper class, instantiating an object of it. .. warning :: This API is experimental and subject to change. .. todo :: This method should raise an exception if the toolkit is unavailable, unless an optional argument is specified that silently avoids registration of toolkits that are unavailable. Parameters ---------- toolkit_wrapper : instance or subclass of ToolkitWrapper The toolkit wrapper to register or its class. exception_if_unavailable : bool, optional, default=True If True, an exception will be raised if the toolkit is unavailable """ # Instantiate class if class, or just add if already instantiated. if isinstance(toolkit_wrapper, type): try: toolkit_wrapper = toolkit_wrapper() except ToolkitUnavailableException: msg = "Unable to load toolkit '{}'. ".format( toolkit_wrapper._toolkit_name ) if exception_if_unavailable: raise ToolkitUnavailableException(msg) else: if "OpenEye" in msg: msg += ( "The Open Force Field Toolkit does not require the OpenEye Toolkits, and can " "use RDKit/AmberTools instead. However, if you have a valid license for the " "OpenEye Toolkits, consider installing them for faster performance and additional " "file format support: " "https://docs.eyesopen.com/toolkits/python/quickstart-python/linuxosx.html " "OpenEye offers free Toolkit licenses for academics: " "https://www.eyesopen.com/academic-licensing" ) logger.warning(f"Warning: {msg}") return # Add toolkit to the registry. self._toolkits.append(toolkit_wrapper) def deregister_toolkit(self, toolkit_wrapper): """ Remove a ToolkitWrapper from the list of toolkits in this ToolkitRegistry .. warning :: This API is experimental and subject to change. Parameters ---------- toolkit_wrapper : instance or subclass of ToolkitWrapper The toolkit wrapper to remove from the registry Raises ------ InvalidToolkitError If toolkit_wrapper is not a ToolkitWrapper or subclass ToolkitUnavailableException If toolkit_wrapper is not found in the registry """ # If passed a class, instantiate it if inspect.isclass(toolkit_wrapper): toolkit_wrapper = toolkit_wrapper() if not isinstance(toolkit_wrapper, ToolkitWrapper): msg = ( f"Argument {toolkit_wrapper} must an ToolkitWrapper " f"or subclass of it. Found type {type(toolkit_wrapper)}." ) raise InvalidToolkitError(msg) toolkits_to_remove = [] for toolkit in self._toolkits: if type(toolkit) == type(toolkit_wrapper): toolkits_to_remove.append(toolkit) if not toolkits_to_remove: msg = ( f"Did not find {toolkit_wrapper} in registry. " f"Currently registered toolkits are {self._toolkits}" ) raise ToolkitUnavailableException(msg) for toolkit_to_remove in toolkits_to_remove: self._toolkits.remove(toolkit_to_remove) def add_toolkit(self, toolkit_wrapper): """ Append a ToolkitWrapper onto the list of toolkits in this ToolkitRegistry .. warning :: This API is experimental and subject to change. Parameters ---------- toolkit_wrapper : openff.toolkit.utils.ToolkitWrapper The ToolkitWrapper object to add to the list of registered toolkits Raises ------ InvalidToolkitError If toolkit_wrapper is not a ToolkitWrapper or subclass """ if not isinstance(toolkit_wrapper, ToolkitWrapper): msg = "Something other than a ToolkitWrapper object was passed to ToolkitRegistry.add_toolkit()\n" msg += "Given object {} of type {}".format( toolkit_wrapper, type(toolkit_wrapper) ) raise InvalidToolkitError(msg) self._toolkits.append(toolkit_wrapper) # TODO: Can we automatically resolve calls to methods that are not explicitly defined using some Python magic? def resolve(self, method_name): """ Resolve the requested method name by checking all registered toolkits in order of precedence for one that provides the requested method. Parameters ---------- method_name : str The name of the method to resolve Returns ------- method The method of the first registered toolkit that provides the requested method name Raises ------ NotImplementedError if the requested method cannot be found among the registered toolkits Examples -------- Create a molecule, and call the toolkit ``to_smiles()`` method directly >>> from openff.toolkit.topology import Molecule >>> molecule = Molecule.from_smiles('Cc1ccccc1') >>> toolkit_registry = ToolkitRegistry([OpenEyeToolkitWrapper, RDKitToolkitWrapper, AmberToolsToolkitWrapper]) >>> method = toolkit_registry.resolve('to_smiles') >>> smiles = method(molecule) .. todo :: Is there a better way to figure out which toolkits implement given methods by introspection? """ for toolkit in self._toolkits: if hasattr(toolkit, method_name): method = getattr(toolkit, method_name) return method # No toolkit was found to provide the requested capability # TODO: Can we help developers by providing a check for typos in expected method names? msg = 'No registered toolkits can provide the capability "{}".\n'.format( method_name ) msg += "Available toolkits are: {}\n".format(self.registered_toolkits) raise NotImplementedError(msg) # TODO: Can we instead register available methods directly with `ToolkitRegistry`, so we can just use `ToolkitRegistry.method()`? def call(self, method_name, *args, raise_exception_types=None, **kwargs): """ Execute the requested method by attempting to use all registered toolkits in order of precedence. ``*args`` and ``**kwargs`` are passed to the desired method, and return values of the method are returned This is a convenient shorthand for ``toolkit_registry.resolve_method(method_name)(*args, **kwargs)`` Parameters ---------- method_name : str The name of the method to execute raise_exception_types : list of Exception subclasses, default=None A list of exception-derived types to catch and raise immediately. If None, this will be set to [Exception], which will raise an error immediately if the first ToolkitWrapper in the registry fails. To try each ToolkitWrapper that provides a suitably-named method, set this to the empty list ([]). If all ToolkitWrappers run without raising any exceptions in this list, a single ValueError will be raised containing the each ToolkitWrapper that was tried and the exception it raised. Raises ------ NotImplementedError if the requested method cannot be found among the registered toolkits ValueError if no exceptions in the raise_exception_types list were raised by ToolkitWrappers, and all ToolkitWrappers in the ToolkitRegistry were tried. Other forms of exceptions are possible if raise_exception_types is specified. These are defined by the ToolkitWrapper method being called. Examples -------- Create a molecule, and call the toolkit ``to_smiles()`` method directly >>> from openff.toolkit.topology import Molecule >>> molecule = Molecule.from_smiles('Cc1ccccc1') >>> toolkit_registry = ToolkitRegistry([OpenEyeToolkitWrapper, RDKitToolkitWrapper]) >>> smiles = toolkit_registry.call('to_smiles', molecule) """ if raise_exception_types is None: raise_exception_types = [Exception] errors = list() for toolkit in self._toolkits: if hasattr(toolkit, method_name): method = getattr(toolkit, method_name) try: return method(*args, **kwargs) except Exception as e: for exception_type in raise_exception_types: if isinstance(e, exception_type): raise e errors.append((toolkit, e)) # No toolkit was found to provide the requested capability # TODO: Can we help developers by providing a check for typos in expected method names? msg = ( f'No registered toolkits can provide the capability "{method_name}" ' f'for args "{args}" and kwargs "{kwargs}"\n' ) msg += "Available toolkits are: {}\n".format(self.registered_toolkits) # Append information about toolkits that implemented the method, but could not handle the provided parameters for toolkit, error in errors: msg += " {} {} : {}\n".format(toolkit, type(error), error) raise ValueError(msg) def __repr__(self): return f"ToolkitRegistry containing " + ", ".join( [tk.toolkit_name for tk in self._toolkits] ) # ============================================================================================= # GLOBAL TOOLKIT REGISTRY # ============================================================================================= # Create global toolkit registry, where all available toolkits are registered GLOBAL_TOOLKIT_REGISTRY = ToolkitRegistry( toolkit_precedence=[ OpenEyeToolkitWrapper, RDKitToolkitWrapper, AmberToolsToolkitWrapper, BuiltInToolkitWrapper, ], exception_if_unavailable=False, ) # ============================================================================================= # SET GLOBAL TOOLKIT-AVAIABLE VARIABLES # ============================================================================================= OPENEYE_AVAILABLE = False RDKIT_AVAILABLE = False AMBERTOOLS_AVAILABLE = False # Only available toolkits will have made it into the GLOBAL_TOOLKIT_REGISTRY for toolkit in GLOBAL_TOOLKIT_REGISTRY.registered_toolkits: if type(toolkit) is OpenEyeToolkitWrapper: OPENEYE_AVAILABLE = True elif type(toolkit) is RDKitToolkitWrapper: RDKIT_AVAILABLE = True elif type(toolkit) is AmberToolsToolkitWrapper: AMBERTOOLS_AVAILABLE = True # ============================================================================================= # WARN IF INSUFFICIENT TOOLKITS INSTALLED # ============================================================================================= # Define basic toolkits that handle essential file I/O BASIC_CHEMINFORMATICS_TOOLKITS = [RDKitToolkitWrapper, OpenEyeToolkitWrapper] # Ensure we have at least one basic toolkit if ( sum( [ tk.is_available() for tk in GLOBAL_TOOLKIT_REGISTRY.registered_toolkits if type(tk) in BASIC_CHEMINFORMATICS_TOOLKITS ] ) == 0 ): msg = "WARNING: No basic cheminformatics toolkits are available.\n" msg += "At least one basic toolkit is required to handle SMARTS matching and file I/O. \n" msg += "Please install at least one of the following basic toolkits:\n" for wrapper in all_subclasses(ToolkitWrapper): if wrapper.toolkit_name is not None: msg += "{} : {}\n".format( wrapper._toolkit_name, wrapper._toolkit_installation_instructions ) print(msg)
import sys, mood_db import matplotlib.pyplot as plt import mplcursors from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas from PyQt5.QtWidgets import QApplication, QWidget from PyQt5.QtGui import QIcon from pathlib import Path from datetime import datetime cur_dir = Path.cwd() class Canvas(FigureCanvas): def __init__(self, parent, date): self.fig, self.ax = plt.subplots(figsize=(10,4)) super().__init__(self.fig) self.setParent(parent) self.values = mood_db.show_values(date) self.x = [x[2] for x in self.values] self.y = [y[0] for y in self.values] self.descriptions = [d[1] for d in self.values] self.ax1 = plt.subplot2grid((1,1), (0,0)) self.lines = self.ax1.bar(self.x, self.y, color="lightsteelblue", edgecolor="black", width=0.95) for label in self.ax1.xaxis.get_ticklabels(): label.set_rotation(45) self.ax1.tick_params(axis="x", colors="tab:blue") self.ax1.tick_params(axis="y", colors="tab:blue") self.ax1.xaxis.label.set_color("tab:blue") plt.xlabel("Days (Year - Month - Day)") self.ax1.yaxis.label.set_color("tab:blue") plt.ylabel("Mood Rating") date = datetime.strptime(date, "%Y-%m") plt.title(f"Mood Rating Graph for {date.strftime("%B %Y")}") plt.yticks([1,2,3,4,5,6,7,8,9,10]) # Only shows the available Y values plt.subplots_adjust(left=0.060, bottom=0.250, right=0.990, top=0.922) # Cursor Hover Annotions # This adds the functionality of showing mood descriptions for each day. cursor = mplcursors.cursor(self.lines, hover=mplcursors.HoverMode.Transient) cursor.connect( "add", lambda sel: sel.annotation.set_text(self.descriptions[sel.target.index])) class AppWindow(QWidget): def __init__(self, date): super().__init__() self.resize(1000, 400) self.setMaximumSize(1000, 400) self.setMinimumSize(1000, 400) self.setWindowTitle(f"Your Mood Rating Graph") self.setWindowIcon(QIcon((cur_dir / "test/icon.png").as_posix())) self.graph = Canvas(self, date) if __name__ == "__main__": print("Name Main") app = QApplication(sys.argv) graph = AppWindow("2021-10") graph.show() sys.exit(app.exec_())
import sys, mood_db import matplotlib.pyplot as plt import mplcursors from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas from PyQt5.QtWidgets import QApplication, QWidget from PyQt5.QtGui import QIcon from pathlib import Path from datetime import datetime cur_dir = Path.cwd() class Canvas(FigureCanvas): def __init__(self, parent, date): self.fig, self.ax = plt.subplots(figsize=(10,4)) super().__init__(self.fig) self.setParent(parent) self.values = mood_db.show_values(date) self.x = [x[2] for x in self.values] self.y = [y[0] for y in self.values] self.descriptions = [d[1] for d in self.values] self.ax1 = plt.subplot2grid((1,1), (0,0)) self.lines = self.ax1.bar(self.x, self.y, color="lightsteelblue", edgecolor="black", width=0.95) for label in self.ax1.xaxis.get_ticklabels(): label.set_rotation(45) self.ax1.tick_params(axis="x", colors="tab:blue") self.ax1.tick_params(axis="y", colors="tab:blue") self.ax1.xaxis.label.set_color("tab:blue") plt.xlabel("Days (Year - Month - Day)") self.ax1.yaxis.label.set_color("tab:blue") plt.ylabel("Mood Rating") date = datetime.strptime(date, "%Y-%m") plt.title(f"Mood Rating Graph for {date.strftime('%B %Y')}") plt.yticks([1,2,3,4,5,6,7,8,9,10]) # Only shows the available Y values plt.subplots_adjust(left=0.060, bottom=0.250, right=0.990, top=0.922) # Cursor Hover Annotions # This adds the functionality of showing mood descriptions for each day. cursor = mplcursors.cursor(self.lines, hover=mplcursors.HoverMode.Transient) cursor.connect( "add", lambda sel: sel.annotation.set_text(self.descriptions[sel.target.index])) class AppWindow(QWidget): def __init__(self, date): super().__init__() self.resize(1000, 400) self.setMaximumSize(1000, 400) self.setMinimumSize(1000, 400) self.setWindowTitle(f"Your Mood Rating Graph") self.setWindowIcon(QIcon((cur_dir / "test/icon.png").as_posix())) self.graph = Canvas(self, date) if __name__ == "__main__": print("Name Main") app = QApplication(sys.argv) graph = AppWindow("2021-10") graph.show() sys.exit(app.exec_())
# Copyright 2020- Robot Framework Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import re import shutil import string import sys import time from concurrent.futures._base import Future from datetime import timedelta from pathlib import Path from typing import Dict, List, Optional, Set, Union from assertionengine import AssertionOperator from overrides import overrides from robot.libraries.BuiltIn import EXECUTION_CONTEXTS, BuiltIn # type: ignore from robot.result.model import TestCase as TestCaseResult # type: ignore from robot.running.model import TestCase as TestCaseRunning # type: ignore from robot.utils import secs_to_timestr, timestr_to_secs # type: ignore from robotlibcore import DynamicCore # type: ignore from .base import ContextCache, LibraryComponent from .generated.playwright_pb2 import Request from .keywords import ( Control, Cookie, Devices, Evaluation, Getters, Interaction, Network, PlaywrightState, Promises, RunOnFailureKeywords, Waiter, WebAppState, ) from .playwright import Playwright from .utils import AutoClosingLevel, is_falsy, is_same_keyword, keyword, logger # Importing this directly from .utils break the stub type checks from .utils.data_types import DelayedKeyword, SupportedBrowsers from .version import __version__ as VERSION class Browser(DynamicCore): """Browser library is a browser automation library for Robot Framework. This is the keyword documentation for Browser library. For information about installation, support, and more please visit the [https://github.com/MarketSquare/robotframework-playwright|project pages]. For more information about Robot Framework itself, see [https://robotframework.org|robotframework.org]. Browser library uses [https://github.com/microsoft/playwright|Playwright Node module] to automate [https://www.chromium.org/Home|Chromium], [https://www.mozilla.org/en-US/firefox/new/|Firefox] and [https://webkit.org/|WebKit] with a single library. == Table of contents == %TOC% = Browser, Context and Page = Browser library works with three different layers that build on each other: *Browser*, *Context* and *Page*. == Browsers == A *browser* can be started with one of the three different engines Chromium, Firefox or Webkit. === Supported Browsers === | Browser | Browser with this engine | | ``chromium`` | Google Chrome, Microsoft Edge (since 2020), Opera | | ``firefox`` | Mozilla Firefox | | ``webkit`` | Apple Safari, Mail, AppStore on MacOS and iOS | Since [https://github.com/microsoft/playwright|Playwright] comes with a pack of builtin binaries for all browsers, no additional drivers e.g. geckodriver are needed. All these browsers that cover more than 85% of the world wide used browsers, can be tested on Windows, Linux and MacOS. Theres is not need for dedicated machines anymore. A browser process is started ``headless`` (without a GUI) by default. Run `New Browser` with specified arguments if a browser with a GUI is requested or if a proxy has to be configured. A browser process can contain several contexts. == Contexts == A *context* corresponds to set of independent incognito pages in a browser that share cookies, sessions or profile settings. Pages in two separate contexts do not share cookies, sessions or profile settings. Compared to Selenium, these do *not* require their own browser process. To get a clean environment a test can just open a new context. Due to this new independent browser sessions can be opened with Robot Framework Browser about 10 times faster than with Selenium by just opening a `New Context` within the opened browser. The context layer is useful e.g. for testing different users sessions on the same webpage without opening a whole new browser context. Contexts can also have detailed configurations, such as geo-location, language settings, the viewport size or color scheme. Contexts do also support http credentials to be set, so that basic authentication can also be tested. To be able to download files within the test, the ``acceptDownloads`` argument must be set to ``True`` in `New Context` keyword. A context can contain different pages. == Pages == A *page* does contain the content of the loaded web site and has a browsing history. Pages and browser tabs are the same. Typical usage could be: | *** Test Cases *** | Starting a browser with a page | New Browser chromium headless=false | New Context viewport={'width': 1920, 'height': 1080} | New Page https://marketsquare.github.io/robotframework-browser/Browser.html | Get Title == Browser The `Open Browser` keyword opens a new browser, a new context and a new page. This keyword is useful for quick experiments or debugging sessions. When a `New Page` is called without an open browser, `New Browser` and `New Context` are executed with default values first. Each Browser, Context and Page has a unique ID with which they can be addressed. A full catalog of what is open can be received by `Get Browser Catalog` as dictionary. = Finding elements = All keywords in the library that need to interact with an element on a web page take an argument typically named ``selector`` that specifies how to find the element. Selector strategies that are supported by default are listed in the table below. | = Strategy = | = Match based on = | = Example = | | ``css`` | CSS selector. | ``css=.class > #login_btn`` | | ``xpath`` | XPath expression. | ``xpath=//input[@id="login_btn"]`` | | ``text`` | Browser text engine. | ``text=Login`` | | ``id`` | Element ID Attribute. | ``id=login_btn`` | == Explicit Selector Strategy == The explicit selector strategy is specified with a prefix using syntax ``strategy=value``. Spaces around the separator are ignored, so ``css=foo``, ``css= foo`` and ``css = foo`` are all equivalent. == Implicit Selector Strategy == *The default selector strategy is `css`.* If selector does not contain one of the know explicit selector strategies, it is assumed to contain css selector. Selectors that are starting with ``//`` or ``..`` are considered as xpath selectors. Selectors that are in quotes are considered as text selectors. Examples: | # CSS selectors are default. | `Click` span > button.some_class # This is equivalent | `Click` css=span > button.some_class # to this. | | # // or .. leads to xpath selector strategy | `Click` //span/button[@class="some_class"] | `Click` xpath=//span/button[@class="some_class"] | | # "text" in quotes leads to exact text selector strategy | `Click` "Login" | `Click` text="Login" == CSS == As written before, the default selector strategy is `css`. See [https://developer.mozilla.org/en-US/docs/Web/CSS/CSS_Selectors | css selector] for more information. Any malformed selector not starting with ``//`` or ``..`` nor starting and ending with a quote is assumed to be a css selector. Example: | `Click` span > button.some_class == XPath == XPath engine is equivalent to [https://developer.mozilla.org/en/docs/Web/API/Document/evaluate|Document.evaluate]. Example: ``xpath=//html/body//span[text()="Hello World"]``. Malformed selector starting with ``//`` or ``..`` is assumed to be an xpath selector. For example, ``//html/body`` is converted to ``xpath=//html/body``. More examples are displayed in `Examples`. Note that xpath does not pierce [https://developer.mozilla.org/en-US/docs/Web/Web_Components/Using_shadow_DOM|shadow_roots]. == Text == Text engine finds an element that contains a text node with the passed text. For example, ``Click text=Login`` clicks on a login button, and ``Wait For Elements State text="lazy loaded text"`` waits for the "lazy loaded text" to appear in the page. Text engine finds fields based on their labels in text inserting keywords. Malformed selector starting and ending with a quote (either ``"`` or ``'``) is assumed to be a text selector. For example, ``Click "Login"`` is converted to ``Click text="Login"``. Be aware that these leads to exact matches only! More examples are displayed in `Examples`. === Insensitive match === By default, the match is case-insensitive, ignores leading/trailing whitespace and searches for a substring. This means ``text= Login`` matches ``<button>Button loGIN (click me)</button>``. === Exact match === Text body can be escaped with single or double quotes for precise matching, insisting on exact match, including specified whitespace and case. This means ``text="Login "`` will only match ``<button>Login </button>`` with exactly one space after "Login". Quoted text follows the usual escaping rules, e.g. use ``\\"`` to escape double quote in a double-quoted string: ``text="foo\\"bar"``. === RegEx === Text body can also be a JavaScript-like regex wrapped in / symbols. This means ``text=/^hello .*!$/i`` or ``text=/^Hello .*!$/`` will match ``<span>Hello Peter Parker!</span>`` with any name after ``Hello``, ending with ``!``. The first one flagged with ``i`` for case-insensitive. See [https://regex101.com/|https://regex101.com] for more information about RegEx. === Button and Submit Values === Input elements of the type button and submit are rendered with their value as text, and text engine finds them. For example, ``text=Login`` matches ``<input type=button value="Login">``. == Cascaded selector syntax == Browser library supports the same selector strategies as the underlying Playwright node module: xpath, css, id and text. The strategy can either be explicitly specified with a prefix or the strategy can be implicit. A major advantage of Browser is, that multiple selector engines can be used within one selector. It is possible to mix XPath, CSS and Text selectors while selecting a single element. Selectors are strings that consists of one or more clauses separated by ``>>`` token, e.g. ``clause1 >> clause2 >> clause3``. When multiple clauses are present, next one is queried relative to the previous one's result. Browser library supports concatination of different selectors seperated by ``>>``. For example: | `Highlight Elements` "Hello" >> ../.. >> .select_button | `Highlight Elements` text=Hello >> xpath=../.. >> css=.select_button Each clause contains a selector engine name and selector body, e.g. ``engine=body``. Here ``engine`` is one of the supported engines (e.g. css or a custom one). Selector ``body`` follows the format of the particular engine, e.g. for css engine it should be a [https://developer.mozilla.org/en-US/docs/Web/CSS/CSS_Selectors | css selector]. Body format is assumed to ignore leading and trailing white spaces, so that extra whitespace can be added for readability. If selector engine needs to include ``>>`` in the body, it should be escaped inside a string to not be confused with clause separator, e.g. ``text="some >> text"``. Selector engine name can be prefixed with ``*`` to capture element that matches the particular clause instead of the last one. For example, ``css=article >> text=Hello`` captures the element with the text ``Hello``, and ``*css=article >> text=Hello`` (note the *) captures the article element that contains some element with the text Hello. For convenience, selectors in the wrong format are heuristically converted to the right format. See `Implicit Selector Strategy` == Examples == | # queries 'div' css selector | Get Element css=div | | # queries '//html/body/div' xpath selector | Get Element //html/body/div | | # queries '"foo"' text selector | Get Element text=foo | | # queries 'span' css selector inside the result of '//html/body/div' xpath selector | Get Element xpath=//html/body/div >> css=span | | # converted to 'css=div' | Get Element div | | # converted to 'xpath=//html/body/div' | Get Element //html/body/div | | # converted to 'text="foo"' | Get Element "foo" | | # queries the div element of every 2nd span element inside an element with the id foo | Get Element \\#foo >> css=span:nth-child(2n+1) >> div | Get Element id=foo >> css=span:nth-child(2n+1) >> div Be aware that using ``#`` as a starting character in Robot Framework would be interpreted as comment. Due to that fact a ``#id`` must be escaped as ``\\#id``. == Frames == By default, selector chains do not cross frame boundaries. It means that a simple CSS selector is not able to select and element located inside an iframe or a frameset. For this usecase, there is a special selector ``>>>`` which can be used to combine a selector for the frame and a selector for an element inside a frame. Given this simple pseudo html snippet: | <iframe id="iframe" src="src.html"> | #document | <!DOCTYPE html> | <html> | <head></head> | <body> | <button id="btn">Click Me</button> | </body> | </html> | </iframe> Here's a keyword call that clicks the button inside the frame. | Click id=iframe >>> id=btn The selectors on the left and right side of ``>>>`` can be any valid selectors. The selector clause directly before the frame opener ``>>>`` must select the frame element. == WebComponents and Shadow DOM == Playwright and so also Browser are able to do automatic piercing of Shadow DOMs and therefore are the best automation technology when working with WebComponents. Also other technologies claim that they can handle [https://developer.mozilla.org/en-US/docs/Web/Web_Components/Using_shadow_DOM|Shadow DOM and Web Components]. However, non of them do pierce shadow roots automatically, which may be inconvenient when working with Shadow DOM and Web Components. For that reason, css engine pierces shadow roots. More specifically, every [https://developer.mozilla.org/en-US/docs/Web/CSS/Descendant_combinator|Descendant combinator] pierces an arbitrary number of open shadow roots, including the implicit descendant combinator at the start of the selector. That means, it is not nessesary to select each shadow host, open its shadow root and select the next shadow host until you reach the element that should be controlled. === CSS:light === ``css:light`` engine is equivalent to [https://developer.mozilla.org/en/docs/Web/API/Document/querySelector | Document.querySelector] and behaves according to the CSS spec. However, it does not pierce shadow roots. ``css`` engine first searches for elements in the light dom in the iteration order, and then recursively inside open shadow roots in the iteration order. It does not search inside closed shadow roots or iframes. Examples: | <article> | <div>In the light dom</div> | <div slot='myslot'>In the light dom, but goes into the shadow slot</div> | <open mode shadow root> | <div class='in-the-shadow'> | <span class='content'> | In the shadow dom | <open mode shadow root> | <li id='target'>Deep in the shadow</li> | </open mode shadow root> | </span> | </div> | <slot name='myslot'></slot> | </open mode shadow root> | </article> Note that ``<open mode shadow root>`` is not an html element, but rather a shadow root created with ``element.attachShadow({mode: 'open'})``. - Both ``"css=article div"`` and ``"css:light=article div"`` match the first ``<div>In the light dom</div>``. - Both ``"css=article > div"`` and ``"css:light=article > div"`` match two ``div`` elements that are direct children of the ``article``. - ``"css=article .in-the-shadow"`` matches the ``<div class='in-the-shadow'>``, piercing the shadow root, while ``"css:light=article .in-the-shadow"`` does not match anything. - ``"css:light=article div > span"`` does not match anything, because both light-dom ``div`` elements do not contain a ``span``. - ``"css=article div > span"`` matches the ``<span class='content'>``, piercing the shadow root. - ``"css=article > .in-the-shadow"`` does not match anything, because ``<div class='in-the-shadow'>`` is not a direct child of ``article`` - ``"css:light=article > .in-the-shadow"`` does not match anything. - ``"css=article li#target"`` matches the ``<li id='target'>Deep in the shadow</li>``, piercing two shadow roots. === text:light === ``text`` engine open pierces shadow roots similarly to ``css``, while ``text:light`` does not. Text engine first searches for elements in the light dom in the iteration order, and then recursively inside open shadow roots in the iteration order. It does not search inside closed shadow roots or iframes. === id, data-testid, data-test-id, data-test and their :light counterparts === Attribute engines are selecting based on the corresponding attribute value. For example: ``data-test-id=foo`` is equivalent to ``css=[data-test-id="foo"]``, and ``id:light=foo`` is equivalent to ``css:light=[id="foo"]``. == Element reference syntax == It is possible to get a reference to an element by using `Get Element` keyword. This reference can be used as a *first* part of a selector by using a special selector syntax `element=` like this: | ${ref}= Get Element .some_class | Click element=${ref} >> .some_child The `.some_child` selector in the example is relative to the element referenced by ${ref}. = Assertions = Keywords that accept arguments ``assertion_operator`` <`AssertionOperator`> and ``assertion_expected`` can optionally assert. %ASSERTION_TABLE% But default the keywords will provide an error message if the assertion fails, but default error message can be overwritten with a ``message`` argument. The ``message`` argument accepts `{value}`, `{value_type}`, `{expected}` and `{expected_type}` [https://docs.python.org/3/library/stdtypes.html#str.format|format] options. The `{value}` is the value returned by the keyword and the `{expected}` is the expected value defined by the user, usually value in the ``assertion_expected`` argument. The `{value_type}` and `{expected_type}` are the type definitions from `{value}` and `{expected}` arguments. In similar fashion as Python [https://docs.python.org/3/library/functions.html#type|type] returns type definition. Assertions will retry until ``timeout`` has expired if they do not pass. The assertion ``assertion_expected`` value is not converted by the library and is used as is. Therefore when assertion is made, the ``assertion_expected`` argument value and value returned the keyword must have same type. If types are not same, assertion will fail. Example `Get Text` always returns a string and has to be compared with a string, even the returnd value might look like a number. Other Keywords have other specific types they return. `Get Element Count` always returns an integer. `Get Bounding Box` and `Get Viewport Size` can be filtered. They return a dictionary without filter and a number when filtered. These Keywords do autoconvert the expected value if a number is returned. * < less or greater > With Strings* Compairisons of strings with ``greater than`` or ``less than`` compares each character, starting from 0 reagarding where it stands in the code page. Example: ``A < Z``, ``Z < a``, ``ac < dc` It does never compare the length of elements. Neither lists nor strings. The comparison stops at the first character that is different. Examples: ``'abcde' < 'abd'``, ``'100.000' < '2'`` In Python 3 and therefore also in Browser it is not possible to compare numbers with strings with a greater or less operator. On keywords that return numbers, the given expected value is automatically converted to a number before comparison. The getters `Get Page State` and `Get Browser Catalog` return a dictionary. Values of the dictionary can directly asserted. Pay attention of possible types because they are evaluated in Python. For example: | Get Page State validate 2020 >= value['year'] # Compairsion of numbers | Get Page State validate "IMPORTANT MESSAGE!" == value['message'] # Compairsion of strings == The 'then' or 'evaluate' closure == Keywords that accept arguments ``assertion_operator`` and ``assertion_expected`` can optionally also use ``then`` or ``evaluate`` closure to modify the returned value with BuiltIn Evaluate. Actual value can be accessed with ``value``. For example ``Get Title then 'TITLE: '+value``. See [https://robotframework.org/robotframework/latest/libraries/BuiltIn.html#Evaluating%20expressions| Builtin Evaluating expressions] for more info on the syntax. == Examples == | # *Keyword* *Selector* *Key* *Assertion Operator* *Assertion Expected* | Get Title equal Page Title | Get Title ^= Page | Get Style //*[@id="div-element"] width > 100 | Get Title matches \\\\w+\\\\s\\\\w+ | Get Title validate value == "Login Page" | Get Title evaluate value if value == "some value" else "something else" = Automatic page and context closing = %AUTO_CLOSING_LEVEL% = Experimental: Re-using same node process = Browser library integrated nodejs and python. NodeJS side can be also executed as a standalone process. Browser libraries running on the same machine can talk to that instead of starting new node processes. This can speed execution when running tests parallel. To start node side run on the directory when Browser package is ``PLAYWRIGHT_BROWSERS_PATH=0 node Browser/wrapper/index.js PORT``. ``PORT`` is port you want to use for the node process. To execute tests then with pabot for example do ``ROBOT_FRAMEWORK_BROWSER_NODE_PORT=PORT pabot ..``. = Extending Browser library with a JavaScript module = Browser library can be extended with JavaScript. Module must be in CommonJS format that Node.js uses. You can translate your ES6 module to Node.js CommonJS style with Babel. Many other languages can be also translated to modules that can be used from Node.js. For example TypeScript, PureScript and ClojureScript just to mention few. | async function myGoToKeyword(page, args, logger, playwright) { | logger(args.toString()) | playwright.coolNewFeature() | return await page.goto(args[0]); | } ``page``: [https://playwright.dev/docs/api/class-page|the playwright Page object]. ``args``: list of strings from Robot Framework keyword call. !! A BIT UNSTABLE AND SUBJECT TO API CHANGES !! ``logger``: callback function that takes strings as arguments and writes them to robot log. Can be called multiple times. ``playwright``: playwright module (* from 'playwright'). Useful for integrating with Playwright features that Browser library doesn't support with it's own keywords. [https://playwright.dev/docs/api/class-playwright| API docs] == Example module.js == | async function myGoToKeyword(page, args) { | await page.goto(args[0]); | return await page.title(); | } | exports.__esModule = true; | exports.myGoToKeyword = myGoToKeyword; == Example Robot Framework side == | *** Settings *** | Library Browser jsextension=${CURDIR}/module.js | | *** Test Cases *** | Hello | New Page | ${title}= myGoToKeyword https://playwright.dev | Should be equal ${title} Playwright Also selector syntax can be extended withm custom selector with a js module == Example module keyword for custom selector registerin == | async function registerMySelector(page, args, log, playwright) { | playwright.selectors.register("myselector", () => ({ | // Returns the first element matching given selector in the root's subtree. | query(root, selector) { | return root.querySelector(`a[data-title="${selector}"]`); | }, | | // Returns all elements matching given selector in the root's subtree. | queryAll(root, selector) { | return Array.from(root.querySelectorAll(`a[data-title="${selector}"]`)); | } | })); | return 1; | } | exports.__esModule = true; | exports.registerMySelector = registerMySelector; """ ROBOT_LIBRARY_VERSION = VERSION ROBOT_LISTENER_API_VERSION = 3 ROBOT_LIBRARY_LISTENER: "Browser" ROBOT_LIBRARY_SCOPE = "GLOBAL" _context_cache = ContextCache() _suite_cleanup_done = False run_on_failure_keyword: Optional[DelayedKeyword] = None def __init__( self, timeout: timedelta = timedelta(seconds=10), enable_playwright_debug: bool = False, auto_closing_level: AutoClosingLevel = AutoClosingLevel.TEST, retry_assertions_for: timedelta = timedelta(seconds=1), run_on_failure: str = "Take Screenshot", external_browser_executable: Optional[Dict[SupportedBrowsers, str]] = None, jsextension: Optional[str] = None, enable_presenter_mode: bool = False, ): """Browser library can be taken into use with optional arguments: - ``timeout`` <str> Timeout for keywords that operate on elements. The keywords will wait for this time for the element to appear into the page. Defaults to "10s" => 10 seconds. - ``enable_playwright_debug`` <bool> Enable low level debug information from the playwright tool. Mainly Useful for the library developers and for debugging purposes. - ``auto_closing_level`` < ``TEST`` | ``SUITE`` | ``MANUAL`` > Configure context and page automatic closing. Default is ``TEST``, for more details, see `AutoClosingLevel` - ``retry_assertions_for`` <str> Timeout for retrying assertions on keywords before failing the keywords. This timeout starts counting from the first failure. Global ``timeout`` will still be in effect. This allows stopping execution faster to assertion failure when element is found fast. - ``run_on_failure`` <str> Sets the keyword to execute in case of a failing Browser keyword. It can be the name of any keyword that does not have any mandatory argument. If no extra action should be done after a failure, set it to ``None`` or any other robot falsy value. - ``external_browser_executable`` <Dict <SupportedBrowsers, Path>> Dict mapping name of browser to path of executable of a browser. Will make opening new browsers of the given type use the set executablePath. Currently only configuring of `chromium` to a separate executable (chrome, chromium and Edge executables all work with recent versions) works. - ``jsextension`` <str> Path to Javascript module exposed as extra keywords. Module must be in CommonJS. - ``enable_presenter_mode`` <bool> Automatic highlights to interacted components, slowMo and a small pause at the end. """ self.timeout = self.convert_timeout(timeout) self.retry_assertions_for = self.convert_timeout(retry_assertions_for) self.ROBOT_LIBRARY_LISTENER = self self._execution_stack: List[dict] = [] self._running_on_failure_keyword = False self._pause_on_failure: Set["Browser"] = set() self.run_on_failure_keyword = ( None if is_falsy(run_on_failure) else {"name": run_on_failure, "args": ()} ) self.external_browser_executable: Dict[SupportedBrowsers, str] = ( external_browser_executable or {} ) self._unresolved_promises: Set[Future] = set() self._playwright_state = PlaywrightState(self) libraries = [ self._playwright_state, Control(self), Cookie(self), Devices(self), Evaluation(self), Interaction(self), Getters(self), Network(self), RunOnFailureKeywords(self), Promises(self), Waiter(self), WebAppState(self), ] self.playwright = Playwright(self, enable_playwright_debug) self._auto_closing_level = auto_closing_level self.current_arguments = () if jsextension is not None: libraries.append(self._initialize_jsextension(jsextension)) self.presenter_mode = enable_presenter_mode DynamicCore.__init__(self, libraries) def _initialize_jsextension(self, jsextension: str) -> LibraryComponent: component = LibraryComponent(self) with self.playwright.grpc_channel() as stub: response = stub.InitializeExtension( Request().FilePath(path=os.path.abspath(jsextension)) ) for name in response.keywords: setattr(component, name, self._jskeyword_call(name)) return component def _jskeyword_call(self, name: str): @keyword def func(*args): with self.playwright.grpc_channel() as stub: responses = stub.CallExtensionKeyword( Request().KeywordCall(name=name, arguments=args) ) for response in responses: logger.info(response.log) if response.json == "": return return json.loads(response.json) return func @property def outputdir(self) -> str: if EXECUTION_CONTEXTS.current: return BuiltIn().get_variable_value("${OUTPUTDIR}") else: return "." @property def browser_output(self) -> Path: return Path(self.outputdir, "browser") def _close(self): try: self.playwright.close() except ConnectionError as e: logger.trace(f"Browser library closing problem: {e}") def _start_suite(self, suite, result): if not self._suite_cleanup_done and self.browser_output.is_dir(): self._suite_cleanup_done = True logger.debug(f"Removing: {self.browser_output}") shutil.rmtree(str(self.browser_output), ignore_errors=True) if self._auto_closing_level != AutoClosingLevel.MANUAL: try: self._execution_stack.append(self.get_browser_catalog()) except ConnectionError as e: logger.debug(f"Browser._start_suite connection problem: {e}") def _start_test(self, test, result): if self._auto_closing_level == AutoClosingLevel.TEST: try: self._execution_stack.append(self.get_browser_catalog()) except ConnectionError as e: logger.debug(f"Browser._start_test connection problem: {e}") def _end_test(self, test: TestCaseRunning, result: TestCaseResult): if len(self._unresolved_promises) > 0: logger.warn(f"Waiting unresolved promises at the end of test '{test.name}'") self.wait_for_all_promises() if self._auto_closing_level == AutoClosingLevel.TEST: if self.presenter_mode: logger.debug("Presenter mode: Wait for 5 seconds before pruning pages") time.sleep(5.0) if len(self._execution_stack) == 0: logger.debug("Browser._end_test empty execution stack") return try: catalog_before_test = self._execution_stack.pop() self._prune_execution_stack(catalog_before_test) except AssertionError as e: logger.debug(f"Test Case: {test.name}, End Test: {e}") except ConnectionError as e: logger.debug(f"Browser._end_test connection problem: {e}") def _end_suite(self, suite, result): if self._auto_closing_level != AutoClosingLevel.MANUAL: if len(self._execution_stack) == 0: logger.debug("Browser._end_suite empty execution stack") return try: catalog_before_suite = self._execution_stack.pop() self._prune_execution_stack(catalog_before_suite) except AssertionError as e: logger.debug(f"Test Suite: {suite.name}, End Suite: {e}") except ConnectionError as e: logger.debug(f"Browser._end_suite connection problem: {e}") def _prune_execution_stack(self, catalog_before: dict) -> None: catalog_after = self.get_browser_catalog() ctx_before_ids = [c["id"] for b in catalog_before for c in b["contexts"]] ctx_after_ids = [c["id"] for b in catalog_after for c in b["contexts"]] new_ctx_ids = [c for c in ctx_after_ids if c not in ctx_before_ids] for ctx_id in new_ctx_ids: self._playwright_state.switch_context(ctx_id) self._playwright_state.close_context() pages_before = [ (p["id"], c["id"]) for b in catalog_before for c in b["contexts"] for p in c["pages"] ] pages_after = [ (p["id"], c["id"]) for b in catalog_after for c in b["contexts"] for p in c["pages"] if c["id"] not in new_ctx_ids ] new_page_ids = [p for p in pages_after if p not in pages_before] for page_id, ctx_id in new_page_ids: self._playwright_state.close_page(page_id, ctx_id) def run_keyword(self, name, args, kwargs=None): try: return DynamicCore.run_keyword(self, name, args, kwargs) except AssertionError as e: self.keyword_error() if self._pause_on_failure: sys.__stdout__.write(f"\n[ FAIL ] {e}") sys.__stdout__.write( "\n[Paused on failure] Press Enter to continue..\n" ) sys.__stdout__.flush() input() raise e def start_keyword(self, name, attrs): """Take screenshot of tests that have failed due to timeout. This method is part of the Listener API implemented by the library. This can be done with BuiltIn keyword `Run Keyword If Timeout Occurred`, but the problem there is that you have to remember to put it into your Suite/Test Teardown. Since taking screenshot is the most obvious thing to do on failure, let's do it automatically. This cannot be implemented as a `end_test` listener method, since at that time, the teardown has already been executed and browser may have been closed already. This implementation will take the screenshot before the teardown begins to execute. """ self.current_arguments = tuple(attrs["args"]) if attrs["type"] == "Teardown": timeout_pattern = "Test timeout .* exceeded." test = EXECUTION_CONTEXTS.current.test if ( test is not None and test.status == "FAIL" and re.match(timeout_pattern, test.message) ): self.screenshot_on_failure(test.name) def keyword_error(self): """Sends screenshot command to Playwright. Only works during testing since this uses robot's outputdir for output. """ if self._running_on_failure_keyword or not self.run_on_failure_keyword: return try: self._running_on_failure_keyword = True if is_same_keyword(self.run_on_failure_keyword["name"], "Take Screenshot"): args = self.run_on_failure_keyword["args"] path = args[0] if args else self._failure_screenshot_path() self.take_screenshot(path) else: BuiltIn().run_keyword( self.run_on_failure_keyword["name"], *self.run_on_failure_keyword["args"], ) except Exception as err: logger.warn( f"Keyword '{self.run_on_failure_keyword["name"]}' could not be run on failure:\n{err}" ) finally: self._running_on_failure_keyword = False def _failure_screenshot_path(self): valid_chars = "-_.() %s%s" % (string.ascii_letters, string.digits) test_name = BuiltIn().get_variable_value("${TEST NAME}", "GENERIC") return os.path.join( self.outputdir, "".join(c for c in test_name if c in valid_chars).replace(" ", "_") + "_FAILURE_SCREENSHOT_{index}", ) def get_timeout(self, timeout: Union[timedelta, None]) -> float: if timeout is None: return self.timeout return self.convert_timeout(timeout) def convert_timeout( self, timeout: Union[timedelta, float], to_ms: bool = True ) -> float: convert = 1000 if to_ms else 1 if isinstance(timeout, timedelta): return timeout.total_seconds() * convert return timestr_to_secs(timeout) * convert def millisecs_to_timestr(self, timeout: float) -> str: return secs_to_timestr(timeout / 1000) @overrides def get_keyword_documentation(self, name): doc = DynamicCore.get_keyword_documentation(self, name) if name == "__intro__": doc = doc.replace("%ASSERTION_TABLE%", AssertionOperator.__doc__) doc = doc.replace("%AUTO_CLOSING_LEVEL%", AutoClosingLevel.__doc__) return doc
# Copyright 2020- Robot Framework Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import re import shutil import string import sys import time from concurrent.futures._base import Future from datetime import timedelta from pathlib import Path from typing import Dict, List, Optional, Set, Union from assertionengine import AssertionOperator from overrides import overrides from robot.libraries.BuiltIn import EXECUTION_CONTEXTS, BuiltIn # type: ignore from robot.result.model import TestCase as TestCaseResult # type: ignore from robot.running.model import TestCase as TestCaseRunning # type: ignore from robot.utils import secs_to_timestr, timestr_to_secs # type: ignore from robotlibcore import DynamicCore # type: ignore from .base import ContextCache, LibraryComponent from .generated.playwright_pb2 import Request from .keywords import ( Control, Cookie, Devices, Evaluation, Getters, Interaction, Network, PlaywrightState, Promises, RunOnFailureKeywords, Waiter, WebAppState, ) from .playwright import Playwright from .utils import AutoClosingLevel, is_falsy, is_same_keyword, keyword, logger # Importing this directly from .utils break the stub type checks from .utils.data_types import DelayedKeyword, SupportedBrowsers from .version import __version__ as VERSION class Browser(DynamicCore): """Browser library is a browser automation library for Robot Framework. This is the keyword documentation for Browser library. For information about installation, support, and more please visit the [https://github.com/MarketSquare/robotframework-playwright|project pages]. For more information about Robot Framework itself, see [https://robotframework.org|robotframework.org]. Browser library uses [https://github.com/microsoft/playwright|Playwright Node module] to automate [https://www.chromium.org/Home|Chromium], [https://www.mozilla.org/en-US/firefox/new/|Firefox] and [https://webkit.org/|WebKit] with a single library. == Table of contents == %TOC% = Browser, Context and Page = Browser library works with three different layers that build on each other: *Browser*, *Context* and *Page*. == Browsers == A *browser* can be started with one of the three different engines Chromium, Firefox or Webkit. === Supported Browsers === | Browser | Browser with this engine | | ``chromium`` | Google Chrome, Microsoft Edge (since 2020), Opera | | ``firefox`` | Mozilla Firefox | | ``webkit`` | Apple Safari, Mail, AppStore on MacOS and iOS | Since [https://github.com/microsoft/playwright|Playwright] comes with a pack of builtin binaries for all browsers, no additional drivers e.g. geckodriver are needed. All these browsers that cover more than 85% of the world wide used browsers, can be tested on Windows, Linux and MacOS. Theres is not need for dedicated machines anymore. A browser process is started ``headless`` (without a GUI) by default. Run `New Browser` with specified arguments if a browser with a GUI is requested or if a proxy has to be configured. A browser process can contain several contexts. == Contexts == A *context* corresponds to set of independent incognito pages in a browser that share cookies, sessions or profile settings. Pages in two separate contexts do not share cookies, sessions or profile settings. Compared to Selenium, these do *not* require their own browser process. To get a clean environment a test can just open a new context. Due to this new independent browser sessions can be opened with Robot Framework Browser about 10 times faster than with Selenium by just opening a `New Context` within the opened browser. The context layer is useful e.g. for testing different users sessions on the same webpage without opening a whole new browser context. Contexts can also have detailed configurations, such as geo-location, language settings, the viewport size or color scheme. Contexts do also support http credentials to be set, so that basic authentication can also be tested. To be able to download files within the test, the ``acceptDownloads`` argument must be set to ``True`` in `New Context` keyword. A context can contain different pages. == Pages == A *page* does contain the content of the loaded web site and has a browsing history. Pages and browser tabs are the same. Typical usage could be: | *** Test Cases *** | Starting a browser with a page | New Browser chromium headless=false | New Context viewport={'width': 1920, 'height': 1080} | New Page https://marketsquare.github.io/robotframework-browser/Browser.html | Get Title == Browser The `Open Browser` keyword opens a new browser, a new context and a new page. This keyword is useful for quick experiments or debugging sessions. When a `New Page` is called without an open browser, `New Browser` and `New Context` are executed with default values first. Each Browser, Context and Page has a unique ID with which they can be addressed. A full catalog of what is open can be received by `Get Browser Catalog` as dictionary. = Finding elements = All keywords in the library that need to interact with an element on a web page take an argument typically named ``selector`` that specifies how to find the element. Selector strategies that are supported by default are listed in the table below. | = Strategy = | = Match based on = | = Example = | | ``css`` | CSS selector. | ``css=.class > #login_btn`` | | ``xpath`` | XPath expression. | ``xpath=//input[@id="login_btn"]`` | | ``text`` | Browser text engine. | ``text=Login`` | | ``id`` | Element ID Attribute. | ``id=login_btn`` | == Explicit Selector Strategy == The explicit selector strategy is specified with a prefix using syntax ``strategy=value``. Spaces around the separator are ignored, so ``css=foo``, ``css= foo`` and ``css = foo`` are all equivalent. == Implicit Selector Strategy == *The default selector strategy is `css`.* If selector does not contain one of the know explicit selector strategies, it is assumed to contain css selector. Selectors that are starting with ``//`` or ``..`` are considered as xpath selectors. Selectors that are in quotes are considered as text selectors. Examples: | # CSS selectors are default. | `Click` span > button.some_class # This is equivalent | `Click` css=span > button.some_class # to this. | | # // or .. leads to xpath selector strategy | `Click` //span/button[@class="some_class"] | `Click` xpath=//span/button[@class="some_class"] | | # "text" in quotes leads to exact text selector strategy | `Click` "Login" | `Click` text="Login" == CSS == As written before, the default selector strategy is `css`. See [https://developer.mozilla.org/en-US/docs/Web/CSS/CSS_Selectors | css selector] for more information. Any malformed selector not starting with ``//`` or ``..`` nor starting and ending with a quote is assumed to be a css selector. Example: | `Click` span > button.some_class == XPath == XPath engine is equivalent to [https://developer.mozilla.org/en/docs/Web/API/Document/evaluate|Document.evaluate]. Example: ``xpath=//html/body//span[text()="Hello World"]``. Malformed selector starting with ``//`` or ``..`` is assumed to be an xpath selector. For example, ``//html/body`` is converted to ``xpath=//html/body``. More examples are displayed in `Examples`. Note that xpath does not pierce [https://developer.mozilla.org/en-US/docs/Web/Web_Components/Using_shadow_DOM|shadow_roots]. == Text == Text engine finds an element that contains a text node with the passed text. For example, ``Click text=Login`` clicks on a login button, and ``Wait For Elements State text="lazy loaded text"`` waits for the "lazy loaded text" to appear in the page. Text engine finds fields based on their labels in text inserting keywords. Malformed selector starting and ending with a quote (either ``"`` or ``'``) is assumed to be a text selector. For example, ``Click "Login"`` is converted to ``Click text="Login"``. Be aware that these leads to exact matches only! More examples are displayed in `Examples`. === Insensitive match === By default, the match is case-insensitive, ignores leading/trailing whitespace and searches for a substring. This means ``text= Login`` matches ``<button>Button loGIN (click me)</button>``. === Exact match === Text body can be escaped with single or double quotes for precise matching, insisting on exact match, including specified whitespace and case. This means ``text="Login "`` will only match ``<button>Login </button>`` with exactly one space after "Login". Quoted text follows the usual escaping rules, e.g. use ``\\"`` to escape double quote in a double-quoted string: ``text="foo\\"bar"``. === RegEx === Text body can also be a JavaScript-like regex wrapped in / symbols. This means ``text=/^hello .*!$/i`` or ``text=/^Hello .*!$/`` will match ``<span>Hello Peter Parker!</span>`` with any name after ``Hello``, ending with ``!``. The first one flagged with ``i`` for case-insensitive. See [https://regex101.com/|https://regex101.com] for more information about RegEx. === Button and Submit Values === Input elements of the type button and submit are rendered with their value as text, and text engine finds them. For example, ``text=Login`` matches ``<input type=button value="Login">``. == Cascaded selector syntax == Browser library supports the same selector strategies as the underlying Playwright node module: xpath, css, id and text. The strategy can either be explicitly specified with a prefix or the strategy can be implicit. A major advantage of Browser is, that multiple selector engines can be used within one selector. It is possible to mix XPath, CSS and Text selectors while selecting a single element. Selectors are strings that consists of one or more clauses separated by ``>>`` token, e.g. ``clause1 >> clause2 >> clause3``. When multiple clauses are present, next one is queried relative to the previous one's result. Browser library supports concatination of different selectors seperated by ``>>``. For example: | `Highlight Elements` "Hello" >> ../.. >> .select_button | `Highlight Elements` text=Hello >> xpath=../.. >> css=.select_button Each clause contains a selector engine name and selector body, e.g. ``engine=body``. Here ``engine`` is one of the supported engines (e.g. css or a custom one). Selector ``body`` follows the format of the particular engine, e.g. for css engine it should be a [https://developer.mozilla.org/en-US/docs/Web/CSS/CSS_Selectors | css selector]. Body format is assumed to ignore leading and trailing white spaces, so that extra whitespace can be added for readability. If selector engine needs to include ``>>`` in the body, it should be escaped inside a string to not be confused with clause separator, e.g. ``text="some >> text"``. Selector engine name can be prefixed with ``*`` to capture element that matches the particular clause instead of the last one. For example, ``css=article >> text=Hello`` captures the element with the text ``Hello``, and ``*css=article >> text=Hello`` (note the *) captures the article element that contains some element with the text Hello. For convenience, selectors in the wrong format are heuristically converted to the right format. See `Implicit Selector Strategy` == Examples == | # queries 'div' css selector | Get Element css=div | | # queries '//html/body/div' xpath selector | Get Element //html/body/div | | # queries '"foo"' text selector | Get Element text=foo | | # queries 'span' css selector inside the result of '//html/body/div' xpath selector | Get Element xpath=//html/body/div >> css=span | | # converted to 'css=div' | Get Element div | | # converted to 'xpath=//html/body/div' | Get Element //html/body/div | | # converted to 'text="foo"' | Get Element "foo" | | # queries the div element of every 2nd span element inside an element with the id foo | Get Element \\#foo >> css=span:nth-child(2n+1) >> div | Get Element id=foo >> css=span:nth-child(2n+1) >> div Be aware that using ``#`` as a starting character in Robot Framework would be interpreted as comment. Due to that fact a ``#id`` must be escaped as ``\\#id``. == Frames == By default, selector chains do not cross frame boundaries. It means that a simple CSS selector is not able to select and element located inside an iframe or a frameset. For this usecase, there is a special selector ``>>>`` which can be used to combine a selector for the frame and a selector for an element inside a frame. Given this simple pseudo html snippet: | <iframe id="iframe" src="src.html"> | #document | <!DOCTYPE html> | <html> | <head></head> | <body> | <button id="btn">Click Me</button> | </body> | </html> | </iframe> Here's a keyword call that clicks the button inside the frame. | Click id=iframe >>> id=btn The selectors on the left and right side of ``>>>`` can be any valid selectors. The selector clause directly before the frame opener ``>>>`` must select the frame element. == WebComponents and Shadow DOM == Playwright and so also Browser are able to do automatic piercing of Shadow DOMs and therefore are the best automation technology when working with WebComponents. Also other technologies claim that they can handle [https://developer.mozilla.org/en-US/docs/Web/Web_Components/Using_shadow_DOM|Shadow DOM and Web Components]. However, non of them do pierce shadow roots automatically, which may be inconvenient when working with Shadow DOM and Web Components. For that reason, css engine pierces shadow roots. More specifically, every [https://developer.mozilla.org/en-US/docs/Web/CSS/Descendant_combinator|Descendant combinator] pierces an arbitrary number of open shadow roots, including the implicit descendant combinator at the start of the selector. That means, it is not nessesary to select each shadow host, open its shadow root and select the next shadow host until you reach the element that should be controlled. === CSS:light === ``css:light`` engine is equivalent to [https://developer.mozilla.org/en/docs/Web/API/Document/querySelector | Document.querySelector] and behaves according to the CSS spec. However, it does not pierce shadow roots. ``css`` engine first searches for elements in the light dom in the iteration order, and then recursively inside open shadow roots in the iteration order. It does not search inside closed shadow roots or iframes. Examples: | <article> | <div>In the light dom</div> | <div slot='myslot'>In the light dom, but goes into the shadow slot</div> | <open mode shadow root> | <div class='in-the-shadow'> | <span class='content'> | In the shadow dom | <open mode shadow root> | <li id='target'>Deep in the shadow</li> | </open mode shadow root> | </span> | </div> | <slot name='myslot'></slot> | </open mode shadow root> | </article> Note that ``<open mode shadow root>`` is not an html element, but rather a shadow root created with ``element.attachShadow({mode: 'open'})``. - Both ``"css=article div"`` and ``"css:light=article div"`` match the first ``<div>In the light dom</div>``. - Both ``"css=article > div"`` and ``"css:light=article > div"`` match two ``div`` elements that are direct children of the ``article``. - ``"css=article .in-the-shadow"`` matches the ``<div class='in-the-shadow'>``, piercing the shadow root, while ``"css:light=article .in-the-shadow"`` does not match anything. - ``"css:light=article div > span"`` does not match anything, because both light-dom ``div`` elements do not contain a ``span``. - ``"css=article div > span"`` matches the ``<span class='content'>``, piercing the shadow root. - ``"css=article > .in-the-shadow"`` does not match anything, because ``<div class='in-the-shadow'>`` is not a direct child of ``article`` - ``"css:light=article > .in-the-shadow"`` does not match anything. - ``"css=article li#target"`` matches the ``<li id='target'>Deep in the shadow</li>``, piercing two shadow roots. === text:light === ``text`` engine open pierces shadow roots similarly to ``css``, while ``text:light`` does not. Text engine first searches for elements in the light dom in the iteration order, and then recursively inside open shadow roots in the iteration order. It does not search inside closed shadow roots or iframes. === id, data-testid, data-test-id, data-test and their :light counterparts === Attribute engines are selecting based on the corresponding attribute value. For example: ``data-test-id=foo`` is equivalent to ``css=[data-test-id="foo"]``, and ``id:light=foo`` is equivalent to ``css:light=[id="foo"]``. == Element reference syntax == It is possible to get a reference to an element by using `Get Element` keyword. This reference can be used as a *first* part of a selector by using a special selector syntax `element=` like this: | ${ref}= Get Element .some_class | Click element=${ref} >> .some_child The `.some_child` selector in the example is relative to the element referenced by ${ref}. = Assertions = Keywords that accept arguments ``assertion_operator`` <`AssertionOperator`> and ``assertion_expected`` can optionally assert. %ASSERTION_TABLE% But default the keywords will provide an error message if the assertion fails, but default error message can be overwritten with a ``message`` argument. The ``message`` argument accepts `{value}`, `{value_type}`, `{expected}` and `{expected_type}` [https://docs.python.org/3/library/stdtypes.html#str.format|format] options. The `{value}` is the value returned by the keyword and the `{expected}` is the expected value defined by the user, usually value in the ``assertion_expected`` argument. The `{value_type}` and `{expected_type}` are the type definitions from `{value}` and `{expected}` arguments. In similar fashion as Python [https://docs.python.org/3/library/functions.html#type|type] returns type definition. Assertions will retry until ``timeout`` has expired if they do not pass. The assertion ``assertion_expected`` value is not converted by the library and is used as is. Therefore when assertion is made, the ``assertion_expected`` argument value and value returned the keyword must have same type. If types are not same, assertion will fail. Example `Get Text` always returns a string and has to be compared with a string, even the returnd value might look like a number. Other Keywords have other specific types they return. `Get Element Count` always returns an integer. `Get Bounding Box` and `Get Viewport Size` can be filtered. They return a dictionary without filter and a number when filtered. These Keywords do autoconvert the expected value if a number is returned. * < less or greater > With Strings* Compairisons of strings with ``greater than`` or ``less than`` compares each character, starting from 0 reagarding where it stands in the code page. Example: ``A < Z``, ``Z < a``, ``ac < dc` It does never compare the length of elements. Neither lists nor strings. The comparison stops at the first character that is different. Examples: ``'abcde' < 'abd'``, ``'100.000' < '2'`` In Python 3 and therefore also in Browser it is not possible to compare numbers with strings with a greater or less operator. On keywords that return numbers, the given expected value is automatically converted to a number before comparison. The getters `Get Page State` and `Get Browser Catalog` return a dictionary. Values of the dictionary can directly asserted. Pay attention of possible types because they are evaluated in Python. For example: | Get Page State validate 2020 >= value['year'] # Compairsion of numbers | Get Page State validate "IMPORTANT MESSAGE!" == value['message'] # Compairsion of strings == The 'then' or 'evaluate' closure == Keywords that accept arguments ``assertion_operator`` and ``assertion_expected`` can optionally also use ``then`` or ``evaluate`` closure to modify the returned value with BuiltIn Evaluate. Actual value can be accessed with ``value``. For example ``Get Title then 'TITLE: '+value``. See [https://robotframework.org/robotframework/latest/libraries/BuiltIn.html#Evaluating%20expressions| Builtin Evaluating expressions] for more info on the syntax. == Examples == | # *Keyword* *Selector* *Key* *Assertion Operator* *Assertion Expected* | Get Title equal Page Title | Get Title ^= Page | Get Style //*[@id="div-element"] width > 100 | Get Title matches \\\\w+\\\\s\\\\w+ | Get Title validate value == "Login Page" | Get Title evaluate value if value == "some value" else "something else" = Automatic page and context closing = %AUTO_CLOSING_LEVEL% = Experimental: Re-using same node process = Browser library integrated nodejs and python. NodeJS side can be also executed as a standalone process. Browser libraries running on the same machine can talk to that instead of starting new node processes. This can speed execution when running tests parallel. To start node side run on the directory when Browser package is ``PLAYWRIGHT_BROWSERS_PATH=0 node Browser/wrapper/index.js PORT``. ``PORT`` is port you want to use for the node process. To execute tests then with pabot for example do ``ROBOT_FRAMEWORK_BROWSER_NODE_PORT=PORT pabot ..``. = Extending Browser library with a JavaScript module = Browser library can be extended with JavaScript. Module must be in CommonJS format that Node.js uses. You can translate your ES6 module to Node.js CommonJS style with Babel. Many other languages can be also translated to modules that can be used from Node.js. For example TypeScript, PureScript and ClojureScript just to mention few. | async function myGoToKeyword(page, args, logger, playwright) { | logger(args.toString()) | playwright.coolNewFeature() | return await page.goto(args[0]); | } ``page``: [https://playwright.dev/docs/api/class-page|the playwright Page object]. ``args``: list of strings from Robot Framework keyword call. !! A BIT UNSTABLE AND SUBJECT TO API CHANGES !! ``logger``: callback function that takes strings as arguments and writes them to robot log. Can be called multiple times. ``playwright``: playwright module (* from 'playwright'). Useful for integrating with Playwright features that Browser library doesn't support with it's own keywords. [https://playwright.dev/docs/api/class-playwright| API docs] == Example module.js == | async function myGoToKeyword(page, args) { | await page.goto(args[0]); | return await page.title(); | } | exports.__esModule = true; | exports.myGoToKeyword = myGoToKeyword; == Example Robot Framework side == | *** Settings *** | Library Browser jsextension=${CURDIR}/module.js | | *** Test Cases *** | Hello | New Page | ${title}= myGoToKeyword https://playwright.dev | Should be equal ${title} Playwright Also selector syntax can be extended withm custom selector with a js module == Example module keyword for custom selector registerin == | async function registerMySelector(page, args, log, playwright) { | playwright.selectors.register("myselector", () => ({ | // Returns the first element matching given selector in the root's subtree. | query(root, selector) { | return root.querySelector(`a[data-title="${selector}"]`); | }, | | // Returns all elements matching given selector in the root's subtree. | queryAll(root, selector) { | return Array.from(root.querySelectorAll(`a[data-title="${selector}"]`)); | } | })); | return 1; | } | exports.__esModule = true; | exports.registerMySelector = registerMySelector; """ ROBOT_LIBRARY_VERSION = VERSION ROBOT_LISTENER_API_VERSION = 3 ROBOT_LIBRARY_LISTENER: "Browser" ROBOT_LIBRARY_SCOPE = "GLOBAL" _context_cache = ContextCache() _suite_cleanup_done = False run_on_failure_keyword: Optional[DelayedKeyword] = None def __init__( self, timeout: timedelta = timedelta(seconds=10), enable_playwright_debug: bool = False, auto_closing_level: AutoClosingLevel = AutoClosingLevel.TEST, retry_assertions_for: timedelta = timedelta(seconds=1), run_on_failure: str = "Take Screenshot", external_browser_executable: Optional[Dict[SupportedBrowsers, str]] = None, jsextension: Optional[str] = None, enable_presenter_mode: bool = False, ): """Browser library can be taken into use with optional arguments: - ``timeout`` <str> Timeout for keywords that operate on elements. The keywords will wait for this time for the element to appear into the page. Defaults to "10s" => 10 seconds. - ``enable_playwright_debug`` <bool> Enable low level debug information from the playwright tool. Mainly Useful for the library developers and for debugging purposes. - ``auto_closing_level`` < ``TEST`` | ``SUITE`` | ``MANUAL`` > Configure context and page automatic closing. Default is ``TEST``, for more details, see `AutoClosingLevel` - ``retry_assertions_for`` <str> Timeout for retrying assertions on keywords before failing the keywords. This timeout starts counting from the first failure. Global ``timeout`` will still be in effect. This allows stopping execution faster to assertion failure when element is found fast. - ``run_on_failure`` <str> Sets the keyword to execute in case of a failing Browser keyword. It can be the name of any keyword that does not have any mandatory argument. If no extra action should be done after a failure, set it to ``None`` or any other robot falsy value. - ``external_browser_executable`` <Dict <SupportedBrowsers, Path>> Dict mapping name of browser to path of executable of a browser. Will make opening new browsers of the given type use the set executablePath. Currently only configuring of `chromium` to a separate executable (chrome, chromium and Edge executables all work with recent versions) works. - ``jsextension`` <str> Path to Javascript module exposed as extra keywords. Module must be in CommonJS. - ``enable_presenter_mode`` <bool> Automatic highlights to interacted components, slowMo and a small pause at the end. """ self.timeout = self.convert_timeout(timeout) self.retry_assertions_for = self.convert_timeout(retry_assertions_for) self.ROBOT_LIBRARY_LISTENER = self self._execution_stack: List[dict] = [] self._running_on_failure_keyword = False self._pause_on_failure: Set["Browser"] = set() self.run_on_failure_keyword = ( None if is_falsy(run_on_failure) else {"name": run_on_failure, "args": ()} ) self.external_browser_executable: Dict[SupportedBrowsers, str] = ( external_browser_executable or {} ) self._unresolved_promises: Set[Future] = set() self._playwright_state = PlaywrightState(self) libraries = [ self._playwright_state, Control(self), Cookie(self), Devices(self), Evaluation(self), Interaction(self), Getters(self), Network(self), RunOnFailureKeywords(self), Promises(self), Waiter(self), WebAppState(self), ] self.playwright = Playwright(self, enable_playwright_debug) self._auto_closing_level = auto_closing_level self.current_arguments = () if jsextension is not None: libraries.append(self._initialize_jsextension(jsextension)) self.presenter_mode = enable_presenter_mode DynamicCore.__init__(self, libraries) def _initialize_jsextension(self, jsextension: str) -> LibraryComponent: component = LibraryComponent(self) with self.playwright.grpc_channel() as stub: response = stub.InitializeExtension( Request().FilePath(path=os.path.abspath(jsextension)) ) for name in response.keywords: setattr(component, name, self._jskeyword_call(name)) return component def _jskeyword_call(self, name: str): @keyword def func(*args): with self.playwright.grpc_channel() as stub: responses = stub.CallExtensionKeyword( Request().KeywordCall(name=name, arguments=args) ) for response in responses: logger.info(response.log) if response.json == "": return return json.loads(response.json) return func @property def outputdir(self) -> str: if EXECUTION_CONTEXTS.current: return BuiltIn().get_variable_value("${OUTPUTDIR}") else: return "." @property def browser_output(self) -> Path: return Path(self.outputdir, "browser") def _close(self): try: self.playwright.close() except ConnectionError as e: logger.trace(f"Browser library closing problem: {e}") def _start_suite(self, suite, result): if not self._suite_cleanup_done and self.browser_output.is_dir(): self._suite_cleanup_done = True logger.debug(f"Removing: {self.browser_output}") shutil.rmtree(str(self.browser_output), ignore_errors=True) if self._auto_closing_level != AutoClosingLevel.MANUAL: try: self._execution_stack.append(self.get_browser_catalog()) except ConnectionError as e: logger.debug(f"Browser._start_suite connection problem: {e}") def _start_test(self, test, result): if self._auto_closing_level == AutoClosingLevel.TEST: try: self._execution_stack.append(self.get_browser_catalog()) except ConnectionError as e: logger.debug(f"Browser._start_test connection problem: {e}") def _end_test(self, test: TestCaseRunning, result: TestCaseResult): if len(self._unresolved_promises) > 0: logger.warn(f"Waiting unresolved promises at the end of test '{test.name}'") self.wait_for_all_promises() if self._auto_closing_level == AutoClosingLevel.TEST: if self.presenter_mode: logger.debug("Presenter mode: Wait for 5 seconds before pruning pages") time.sleep(5.0) if len(self._execution_stack) == 0: logger.debug("Browser._end_test empty execution stack") return try: catalog_before_test = self._execution_stack.pop() self._prune_execution_stack(catalog_before_test) except AssertionError as e: logger.debug(f"Test Case: {test.name}, End Test: {e}") except ConnectionError as e: logger.debug(f"Browser._end_test connection problem: {e}") def _end_suite(self, suite, result): if self._auto_closing_level != AutoClosingLevel.MANUAL: if len(self._execution_stack) == 0: logger.debug("Browser._end_suite empty execution stack") return try: catalog_before_suite = self._execution_stack.pop() self._prune_execution_stack(catalog_before_suite) except AssertionError as e: logger.debug(f"Test Suite: {suite.name}, End Suite: {e}") except ConnectionError as e: logger.debug(f"Browser._end_suite connection problem: {e}") def _prune_execution_stack(self, catalog_before: dict) -> None: catalog_after = self.get_browser_catalog() ctx_before_ids = [c["id"] for b in catalog_before for c in b["contexts"]] ctx_after_ids = [c["id"] for b in catalog_after for c in b["contexts"]] new_ctx_ids = [c for c in ctx_after_ids if c not in ctx_before_ids] for ctx_id in new_ctx_ids: self._playwright_state.switch_context(ctx_id) self._playwright_state.close_context() pages_before = [ (p["id"], c["id"]) for b in catalog_before for c in b["contexts"] for p in c["pages"] ] pages_after = [ (p["id"], c["id"]) for b in catalog_after for c in b["contexts"] for p in c["pages"] if c["id"] not in new_ctx_ids ] new_page_ids = [p for p in pages_after if p not in pages_before] for page_id, ctx_id in new_page_ids: self._playwright_state.close_page(page_id, ctx_id) def run_keyword(self, name, args, kwargs=None): try: return DynamicCore.run_keyword(self, name, args, kwargs) except AssertionError as e: self.keyword_error() if self._pause_on_failure: sys.__stdout__.write(f"\n[ FAIL ] {e}") sys.__stdout__.write( "\n[Paused on failure] Press Enter to continue..\n" ) sys.__stdout__.flush() input() raise e def start_keyword(self, name, attrs): """Take screenshot of tests that have failed due to timeout. This method is part of the Listener API implemented by the library. This can be done with BuiltIn keyword `Run Keyword If Timeout Occurred`, but the problem there is that you have to remember to put it into your Suite/Test Teardown. Since taking screenshot is the most obvious thing to do on failure, let's do it automatically. This cannot be implemented as a `end_test` listener method, since at that time, the teardown has already been executed and browser may have been closed already. This implementation will take the screenshot before the teardown begins to execute. """ self.current_arguments = tuple(attrs["args"]) if attrs["type"] == "Teardown": timeout_pattern = "Test timeout .* exceeded." test = EXECUTION_CONTEXTS.current.test if ( test is not None and test.status == "FAIL" and re.match(timeout_pattern, test.message) ): self.screenshot_on_failure(test.name) def keyword_error(self): """Sends screenshot command to Playwright. Only works during testing since this uses robot's outputdir for output. """ if self._running_on_failure_keyword or not self.run_on_failure_keyword: return try: self._running_on_failure_keyword = True if is_same_keyword(self.run_on_failure_keyword["name"], "Take Screenshot"): args = self.run_on_failure_keyword["args"] path = args[0] if args else self._failure_screenshot_path() self.take_screenshot(path) else: BuiltIn().run_keyword( self.run_on_failure_keyword["name"], *self.run_on_failure_keyword["args"], ) except Exception as err: logger.warn( f"Keyword '{self.run_on_failure_keyword['name']}' could not be run on failure:\n{err}" ) finally: self._running_on_failure_keyword = False def _failure_screenshot_path(self): valid_chars = "-_.() %s%s" % (string.ascii_letters, string.digits) test_name = BuiltIn().get_variable_value("${TEST NAME}", "GENERIC") return os.path.join( self.outputdir, "".join(c for c in test_name if c in valid_chars).replace(" ", "_") + "_FAILURE_SCREENSHOT_{index}", ) def get_timeout(self, timeout: Union[timedelta, None]) -> float: if timeout is None: return self.timeout return self.convert_timeout(timeout) def convert_timeout( self, timeout: Union[timedelta, float], to_ms: bool = True ) -> float: convert = 1000 if to_ms else 1 if isinstance(timeout, timedelta): return timeout.total_seconds() * convert return timestr_to_secs(timeout) * convert def millisecs_to_timestr(self, timeout: float) -> str: return secs_to_timestr(timeout / 1000) @overrides def get_keyword_documentation(self, name): doc = DynamicCore.get_keyword_documentation(self, name) if name == "__intro__": doc = doc.replace("%ASSERTION_TABLE%", AssertionOperator.__doc__) doc = doc.replace("%AUTO_CLOSING_LEVEL%", AutoClosingLevel.__doc__) return doc
#!/usr/bin/env python3 import templates import cgi import secret import os # import json # Print environment as json # print("Content-Type: application/json") # print() # print(json.dumps(dict(os.environ), indent=2)) # Print query parameter data in html # print("Content-Type: text/html") # print() # print(f"<p>QUERY_STRING={os.environ.get("QUERY_STRING")}</p>") # Print browser data in html # print("Content-Type: text/html") # print() # print(f"<p>HTTP_USER_AGENT={os.environ.get("HTTP_USER_AGENT")}</p>") USER_COOKIE = "user=" + secret.username PASS_COOKIE = "pass=" + secret.password cookie_string = os.environ.get('HTTP_COOKIE') if (cookie_string and USER_COOKIE in cookie_string and PASS_COOKIE in cookie_string): print(templates.secret_page(secret.username, secret.password)) else: fieldStorage = cgi.FieldStorage() if (fieldStorage and fieldStorage['username'] and fieldStorage['password']): username = fieldStorage['username'].value password = fieldStorage['password'].value print('Set-Cookie: user=' + username) print('Set-Cookie: pass=' + password) if (username == secret.username and password == secret.password): isLoggedIn = True else: print(templates.after_login_incorrect()) else: print(templates.login_page()) print()
#!/usr/bin/env python3 import templates import cgi import secret import os # import json # Print environment as json # print("Content-Type: application/json") # print() # print(json.dumps(dict(os.environ), indent=2)) # Print query parameter data in html # print("Content-Type: text/html") # print() # print(f"<p>QUERY_STRING={os.environ.get('QUERY_STRING')}</p>") # Print browser data in html # print("Content-Type: text/html") # print() # print(f"<p>HTTP_USER_AGENT={os.environ.get('HTTP_USER_AGENT')}</p>") USER_COOKIE = "user=" + secret.username PASS_COOKIE = "pass=" + secret.password cookie_string = os.environ.get('HTTP_COOKIE') if (cookie_string and USER_COOKIE in cookie_string and PASS_COOKIE in cookie_string): print(templates.secret_page(secret.username, secret.password)) else: fieldStorage = cgi.FieldStorage() if (fieldStorage and fieldStorage['username'] and fieldStorage['password']): username = fieldStorage['username'].value password = fieldStorage['password'].value print('Set-Cookie: user=' + username) print('Set-Cookie: pass=' + password) if (username == secret.username and password == secret.password): isLoggedIn = True else: print(templates.after_login_incorrect()) else: print(templates.login_page()) print()
from threading import Thread, Event, Lock from typing import Union from mongoengine.errors import NotUniqueError from web3.datastructures import AttributeDict from src.contracts.ethereum.event_listener import EthEventListener from src.contracts.ethereum.multisig_wallet import MultisigWallet from src.contracts.secret.secret_contract import mint_json from src.db.collections.commands import Commands from src.db.collections.eth_swap import Swap, Status from src.db.collections.signatures import Signatures from src.db.collections.swaptrackerobject import SwapTrackerObject from src.signer.secret20.signer import SecretAccount from src.util.coins import CoinHandler from src.util.common import Token from src.util.config import Config from src.util.logger import get_logger from src.util.secretcli import create_unsigned_tx, account_info from src.util.web3 import w3 class SecretManager(Thread): """Registers to contract event and manages tx state in DB""" def __init__( self, contract: MultisigWallet, s20_multisig_account: SecretAccount, config: Config, **kwargs ): self.contract = contract self._coins = CoinHandler() self.config = config self.multisig = s20_multisig_account self.event_listener = EthEventListener(contract, config) self.logger = get_logger( db_name=config.db_name, loglevel=config.log_level, logger_name=config.logger_name or f"{self.__class__.__name__}-{self.multisig.name}" ) self.stop_signal = Event() self.account_num = 0 self.sequence_lock = Lock() self.sequence = 0 self.update_sequence() self.event_listener.register(self._handle, contract.tracked_event(),) super().__init__(group=None, name="SecretManager", target=self.run, **kwargs) def running(self): return self.is_alive() and self.event_listener.is_alive() @property def _sequence(self): return self.sequence @_sequence.setter def _sequence(self, val): with self.sequence_lock: self.sequence = val def stop(self): self.logger.info("Stopping..") self.event_listener.stop() self.stop_signal.set() def run(self): """Scans for signed transactions and updates status if multisig threshold achieved""" self.logger.info("Starting..") to_block = w3.eth.blockNumber - self.config.eth_confirmations self.catch_up(to_block) self.event_listener.start() self.logger.info("Done catching up") while not self.stop_signal.is_set(): for transaction in Swap.objects(status=Status.SWAP_RETRY, src_network="Ethereum"): self._retry(transaction) for transaction in Swap.objects(status=Status.SWAP_UNSIGNED): self.handle_unsigned_tx(transaction) for transaction in Commands.objects(status=Status.SWAP_UNSIGNED): self.handle_unsigned_tx(transaction) self.stop_signal.wait(self.config.sleep_interval) def handle_unsigned_tx(self, transaction: Union[Commands, Swap]): self.logger.debug(f"Checking unsigned tx {transaction.id}") if Signatures.objects(tx_id=transaction.id).count() >= self.config.signatures_threshold: self.logger.info(f"Found tx {transaction.id} with enough signatures to broadcast") transaction.status = Status.SWAP_SIGNED transaction.save() self.logger.info(f"Set status of tx {transaction.id} to signed") else: self.logger.debug(f"Tx {transaction.id} does not have enough signatures") def catch_up(self, to_block: int): from_block = SwapTrackerObject.last_processed('Ethereum') + 1 self.logger.debug(f'Starting to catch up from block {from_block}') if self.config.eth_start_block > from_block: self.logger.debug(f'Due to config fast forwarding to block {self.config.eth_start_block}') from_block = self.config.eth_start_block SwapTrackerObject.update_last_processed('Ethereum', from_block) if to_block <= 0 or to_block < from_block: return self.logger.debug(f'Catching up to current block: {to_block}') evt_filter = self.contract.contract.events.Swap.createFilter(fromBlock=from_block, toBlock=to_block) for event in evt_filter.get_all_entries(): self._handle(event) evt_filter = self.contract.contract.events.SwapToken.createFilter(fromBlock=from_block, toBlock=to_block) for event in evt_filter.get_all_entries(): self._handle(event) # for event_name in self.contract.tracked_event(): # for event in self.event_listener.events_in_range(event_name, from_block, to_block): # self.logger.info(f'Found new event at block: {event['blockNumber']}') SwapTrackerObject.update_last_processed('Ethereum', to_block) def _get_s20(self, foreign_token_addr: str) -> Token: print(f"{list(self._coins.keys())} - {foreign_token_addr}") coin = self._coins.get(foreign_token_addr) return Token(address=coin.scrt_address, name=coin.name) def _retry(self, tx: Swap): for signature in Signatures.objects(tx_id=tx.id): signature.delete() tx.status = Status.SWAP_UNSIGNED tx.sequence = self.sequence tx.save() self.sequence = self.sequence + 1 def _handle(self, event: AttributeDict): """Extracts tx data from @event and add unsigned_tx to db""" if not self.contract.verify_destination(event): return amount = str(self.contract.extract_amount(event)) try: block_number, tx_hash, recipient, token = self.contract.parse_swap_event(event) if token is None: token = 'native' except ValueError: return try: s20 = self._get_s20(token) mint = mint_json(amount, tx_hash, recipient, s20.address) unsigned_tx = create_unsigned_tx( self.config.scrt_swap_address, mint, self.config.chain_id, self.config.enclave_key, self.config.swap_code_hash, self.multisig.address ) tx = Swap(src_tx_hash=tx_hash, status=Status.SWAP_UNSIGNED, unsigned_tx=unsigned_tx, src_coin=token, dst_coin=s20.name, dst_address=s20.address, src_network="Ethereum", sequence=self.sequence, amount=amount) tx.save(force_insert=True) self.sequence = self.sequence + 1 self.logger.info(f"saved new Ethereum -> Secret transaction {tx_hash}, for {amount} {s20.name}") # SwapTrackerObject.update_last_processed(src=Source.ETH.value, update_val=block_number) except (IndexError, AttributeError, KeyError) as e: self.logger.error(f"Failed on tx {tx_hash}, block {block_number}, " f"due to error: {e}") except RuntimeError as e: self.logger.error(f"Failed to create swap tx for eth hash {tx_hash}, block {block_number}. Error: {e}") except NotUniqueError as e: self.logger.error(f"Tried to save duplicate TX, might be a catch up issue - {e}") # return block_number, tx_hash, recipient, s20 SwapTrackerObject.update_last_processed('Ethereum', block_number) def _account_details(self): details = account_info(self.multisig.address) return details["value"]["account_number"], details["value"]["sequence"] def update_sequence(self): self.account_num, self.sequence = self._account_details()
from threading import Thread, Event, Lock from typing import Union from mongoengine.errors import NotUniqueError from web3.datastructures import AttributeDict from src.contracts.ethereum.event_listener import EthEventListener from src.contracts.ethereum.multisig_wallet import MultisigWallet from src.contracts.secret.secret_contract import mint_json from src.db.collections.commands import Commands from src.db.collections.eth_swap import Swap, Status from src.db.collections.signatures import Signatures from src.db.collections.swaptrackerobject import SwapTrackerObject from src.signer.secret20.signer import SecretAccount from src.util.coins import CoinHandler from src.util.common import Token from src.util.config import Config from src.util.logger import get_logger from src.util.secretcli import create_unsigned_tx, account_info from src.util.web3 import w3 class SecretManager(Thread): """Registers to contract event and manages tx state in DB""" def __init__( self, contract: MultisigWallet, s20_multisig_account: SecretAccount, config: Config, **kwargs ): self.contract = contract self._coins = CoinHandler() self.config = config self.multisig = s20_multisig_account self.event_listener = EthEventListener(contract, config) self.logger = get_logger( db_name=config.db_name, loglevel=config.log_level, logger_name=config.logger_name or f"{self.__class__.__name__}-{self.multisig.name}" ) self.stop_signal = Event() self.account_num = 0 self.sequence_lock = Lock() self.sequence = 0 self.update_sequence() self.event_listener.register(self._handle, contract.tracked_event(),) super().__init__(group=None, name="SecretManager", target=self.run, **kwargs) def running(self): return self.is_alive() and self.event_listener.is_alive() @property def _sequence(self): return self.sequence @_sequence.setter def _sequence(self, val): with self.sequence_lock: self.sequence = val def stop(self): self.logger.info("Stopping..") self.event_listener.stop() self.stop_signal.set() def run(self): """Scans for signed transactions and updates status if multisig threshold achieved""" self.logger.info("Starting..") to_block = w3.eth.blockNumber - self.config.eth_confirmations self.catch_up(to_block) self.event_listener.start() self.logger.info("Done catching up") while not self.stop_signal.is_set(): for transaction in Swap.objects(status=Status.SWAP_RETRY, src_network="Ethereum"): self._retry(transaction) for transaction in Swap.objects(status=Status.SWAP_UNSIGNED): self.handle_unsigned_tx(transaction) for transaction in Commands.objects(status=Status.SWAP_UNSIGNED): self.handle_unsigned_tx(transaction) self.stop_signal.wait(self.config.sleep_interval) def handle_unsigned_tx(self, transaction: Union[Commands, Swap]): self.logger.debug(f"Checking unsigned tx {transaction.id}") if Signatures.objects(tx_id=transaction.id).count() >= self.config.signatures_threshold: self.logger.info(f"Found tx {transaction.id} with enough signatures to broadcast") transaction.status = Status.SWAP_SIGNED transaction.save() self.logger.info(f"Set status of tx {transaction.id} to signed") else: self.logger.debug(f"Tx {transaction.id} does not have enough signatures") def catch_up(self, to_block: int): from_block = SwapTrackerObject.last_processed('Ethereum') + 1 self.logger.debug(f'Starting to catch up from block {from_block}') if self.config.eth_start_block > from_block: self.logger.debug(f'Due to config fast forwarding to block {self.config.eth_start_block}') from_block = self.config.eth_start_block SwapTrackerObject.update_last_processed('Ethereum', from_block) if to_block <= 0 or to_block < from_block: return self.logger.debug(f'Catching up to current block: {to_block}') evt_filter = self.contract.contract.events.Swap.createFilter(fromBlock=from_block, toBlock=to_block) for event in evt_filter.get_all_entries(): self._handle(event) evt_filter = self.contract.contract.events.SwapToken.createFilter(fromBlock=from_block, toBlock=to_block) for event in evt_filter.get_all_entries(): self._handle(event) # for event_name in self.contract.tracked_event(): # for event in self.event_listener.events_in_range(event_name, from_block, to_block): # self.logger.info(f'Found new event at block: {event["blockNumber"]}') SwapTrackerObject.update_last_processed('Ethereum', to_block) def _get_s20(self, foreign_token_addr: str) -> Token: print(f"{list(self._coins.keys())} - {foreign_token_addr}") coin = self._coins.get(foreign_token_addr) return Token(address=coin.scrt_address, name=coin.name) def _retry(self, tx: Swap): for signature in Signatures.objects(tx_id=tx.id): signature.delete() tx.status = Status.SWAP_UNSIGNED tx.sequence = self.sequence tx.save() self.sequence = self.sequence + 1 def _handle(self, event: AttributeDict): """Extracts tx data from @event and add unsigned_tx to db""" if not self.contract.verify_destination(event): return amount = str(self.contract.extract_amount(event)) try: block_number, tx_hash, recipient, token = self.contract.parse_swap_event(event) if token is None: token = 'native' except ValueError: return try: s20 = self._get_s20(token) mint = mint_json(amount, tx_hash, recipient, s20.address) unsigned_tx = create_unsigned_tx( self.config.scrt_swap_address, mint, self.config.chain_id, self.config.enclave_key, self.config.swap_code_hash, self.multisig.address ) tx = Swap(src_tx_hash=tx_hash, status=Status.SWAP_UNSIGNED, unsigned_tx=unsigned_tx, src_coin=token, dst_coin=s20.name, dst_address=s20.address, src_network="Ethereum", sequence=self.sequence, amount=amount) tx.save(force_insert=True) self.sequence = self.sequence + 1 self.logger.info(f"saved new Ethereum -> Secret transaction {tx_hash}, for {amount} {s20.name}") # SwapTrackerObject.update_last_processed(src=Source.ETH.value, update_val=block_number) except (IndexError, AttributeError, KeyError) as e: self.logger.error(f"Failed on tx {tx_hash}, block {block_number}, " f"due to error: {e}") except RuntimeError as e: self.logger.error(f"Failed to create swap tx for eth hash {tx_hash}, block {block_number}. Error: {e}") except NotUniqueError as e: self.logger.error(f"Tried to save duplicate TX, might be a catch up issue - {e}") # return block_number, tx_hash, recipient, s20 SwapTrackerObject.update_last_processed('Ethereum', block_number) def _account_details(self): details = account_info(self.multisig.address) return details["value"]["account_number"], details["value"]["sequence"] def update_sequence(self): self.account_num, self.sequence = self._account_details()
# Copyright 2020-2021 Huawei Technologies Co., Ltd.All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Mapper module.""" import numpy as np from mindconverter.graph_based_converter.constant import ExchangeMessageKeywords, TemplateKeywords, \ WeightType from mindconverter.graph_based_converter.mapper.base import ONNXToMindSporeMapper class AddMapper(ONNXToMindSporeMapper): """Add mapper.""" @staticmethod def _operation_name_in_ms(*args, **kwargs): return "P.Add" @staticmethod def _convert_params(**kwargs): return dict() @staticmethod def _convert_trained_weights(**kwargs): weights = kwargs.get('weights', list()) tensor = AddMapper._find_val_by_index(0, weights) onnx_name = AddMapper._find_onnx_name_by_index(0, weights) if isinstance(tensor, np.ndarray) and tensor.shape: return {'bias': {'data': tensor, 'type': WeightType.PARAMETER.value, 'onnx_name': onnx_name}} return dict() @staticmethod def _generate_snippet_template(**kwargs): template, exchange_msg, outputs_list, outputs_mapping = ONNXToMindSporeMapper._generate_snippet_template( **kwargs) op = kwargs.get("operation") args = kwargs.get("converted_params") weights = kwargs.get("weights") trainable_params = kwargs.get('trainable_params', dict()) if not op: raise ValueError("Can not get MindSpore operation name.") if not weights: variable_slot = "var_0" construct_template = \ f"opt_{{{variable_slot}}} = {op}()({{{ExchangeMessageKeywords.VariableScope.value.INPUTS.value}}})" template = { variable_slot: { TemplateKeywords.INIT.value: [], TemplateKeywords.CONSTRUCT.value: [construct_template] } } return template, exchange_msg, outputs_list, outputs_mapping return AddMapper._generate_snippet_template_with_weights(weights, args, op, trainable_params) @staticmethod def _generate_snippet_template_with_weights(weights, args, op, trainable_params): """Generate template when weights exist.""" tensor = AddMapper._find_val_by_index(0, weights) bias_shape = tensor.shape bias_location = AddMapper._find_location_by_index(0, weights) variable_slot = "var_0" inputs_in_construct = [f"{{{ExchangeMessageKeywords.VariableScope.value.INPUTS.value}}}"] if bias_location != -1: inputs_in_construct.insert(bias_location, f"self.{{{variable_slot}}}_bias") if bias_shape: # Note: adding weight shape to args is now deprecated due to conflict of partial weights share processing. variable_slot_param_name = f"{variable_slot}/bias" init_tensor = f"self.{{{variable_slot}}}_bias = {{{variable_slot_param_name}}}" else: args["bias_value"] = tensor.tolist() init_tensor = f"self.{{{variable_slot}}}_bias = {{bias_value}}" construct_template = f"opt_{{{variable_slot}}} = {" + ".join(inputs_in_construct)}" template = { variable_slot: { TemplateKeywords.INIT.value: [init_tensor], TemplateKeywords.CONSTRUCT.value: [construct_template] } } exchange_msg = { variable_slot: { ExchangeMessageKeywords.VariableScope.value.OPERATION.value: op, ExchangeMessageKeywords.VariableScope.value.VARIABLE_NAME.value: None, ExchangeMessageKeywords.VariableScope.value.OUTPUT_TYPE.value: ExchangeMessageKeywords.VariableScope.value.TSR_TYPE.value, ExchangeMessageKeywords.VariableScope.value.INPUTS.value: [], ExchangeMessageKeywords.VariableScope.value.ARGS.value: args, ExchangeMessageKeywords.VariableScope.value.WEIGHTS.value: weights, ExchangeMessageKeywords.VariableScope.value.TRAINABLE_PARAMS.value: trainable_params } } if bias_shape: exchange_msg[variable_slot][ExchangeMessageKeywords.VariableScope.value.PARAMETERS_DECLARED.value] = { "bias": "" } outputs_list = [f"opt_{{{variable_slot}}}"] outputs_mapping = ((0, 0),) return template, exchange_msg, outputs_list, outputs_mapping
# Copyright 2020-2021 Huawei Technologies Co., Ltd.All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Mapper module.""" import numpy as np from mindconverter.graph_based_converter.constant import ExchangeMessageKeywords, TemplateKeywords, \ WeightType from mindconverter.graph_based_converter.mapper.base import ONNXToMindSporeMapper class AddMapper(ONNXToMindSporeMapper): """Add mapper.""" @staticmethod def _operation_name_in_ms(*args, **kwargs): return "P.Add" @staticmethod def _convert_params(**kwargs): return dict() @staticmethod def _convert_trained_weights(**kwargs): weights = kwargs.get('weights', list()) tensor = AddMapper._find_val_by_index(0, weights) onnx_name = AddMapper._find_onnx_name_by_index(0, weights) if isinstance(tensor, np.ndarray) and tensor.shape: return {'bias': {'data': tensor, 'type': WeightType.PARAMETER.value, 'onnx_name': onnx_name}} return dict() @staticmethod def _generate_snippet_template(**kwargs): template, exchange_msg, outputs_list, outputs_mapping = ONNXToMindSporeMapper._generate_snippet_template( **kwargs) op = kwargs.get("operation") args = kwargs.get("converted_params") weights = kwargs.get("weights") trainable_params = kwargs.get('trainable_params', dict()) if not op: raise ValueError("Can not get MindSpore operation name.") if not weights: variable_slot = "var_0" construct_template = \ f"opt_{{{variable_slot}}} = {op}()({{{ExchangeMessageKeywords.VariableScope.value.INPUTS.value}}})" template = { variable_slot: { TemplateKeywords.INIT.value: [], TemplateKeywords.CONSTRUCT.value: [construct_template] } } return template, exchange_msg, outputs_list, outputs_mapping return AddMapper._generate_snippet_template_with_weights(weights, args, op, trainable_params) @staticmethod def _generate_snippet_template_with_weights(weights, args, op, trainable_params): """Generate template when weights exist.""" tensor = AddMapper._find_val_by_index(0, weights) bias_shape = tensor.shape bias_location = AddMapper._find_location_by_index(0, weights) variable_slot = "var_0" inputs_in_construct = [f"{{{ExchangeMessageKeywords.VariableScope.value.INPUTS.value}}}"] if bias_location != -1: inputs_in_construct.insert(bias_location, f"self.{{{variable_slot}}}_bias") if bias_shape: # Note: adding weight shape to args is now deprecated due to conflict of partial weights share processing. variable_slot_param_name = f"{variable_slot}/bias" init_tensor = f"self.{{{variable_slot}}}_bias = {{{variable_slot_param_name}}}" else: args["bias_value"] = tensor.tolist() init_tensor = f"self.{{{variable_slot}}}_bias = {{bias_value}}" construct_template = f"opt_{{{variable_slot}}} = {' + '.join(inputs_in_construct)}" template = { variable_slot: { TemplateKeywords.INIT.value: [init_tensor], TemplateKeywords.CONSTRUCT.value: [construct_template] } } exchange_msg = { variable_slot: { ExchangeMessageKeywords.VariableScope.value.OPERATION.value: op, ExchangeMessageKeywords.VariableScope.value.VARIABLE_NAME.value: None, ExchangeMessageKeywords.VariableScope.value.OUTPUT_TYPE.value: ExchangeMessageKeywords.VariableScope.value.TSR_TYPE.value, ExchangeMessageKeywords.VariableScope.value.INPUTS.value: [], ExchangeMessageKeywords.VariableScope.value.ARGS.value: args, ExchangeMessageKeywords.VariableScope.value.WEIGHTS.value: weights, ExchangeMessageKeywords.VariableScope.value.TRAINABLE_PARAMS.value: trainable_params } } if bias_shape: exchange_msg[variable_slot][ExchangeMessageKeywords.VariableScope.value.PARAMETERS_DECLARED.value] = { "bias": "" } outputs_list = [f"opt_{{{variable_slot}}}"] outputs_mapping = ((0, 0),) return template, exchange_msg, outputs_list, outputs_mapping
import indicators.rsi.rsi_settings as rsi_settings from indicators.rsi.rsi_obj import RSI import ochlv_db.db_settings as db_settings import sqlite3 import pandas as pd from tqdm import tqdm import time from datetime import datetime stop_streaming = False def rsi_fn(n = '-------rsi_idle-------'): kwargs = { 'path': rsi_settings.db_path, 'table_name': rsi_settings.table_name, 'table_list': rsi_settings.table_list, 'update_tdiff': rsi_settings.update_tdiff, } kwargs.update(rsi_settings.settings) rsi = RSI(**kwargs) rsi.update_latest() while stop_streaming == False: print(n) last_ts = rsi.get_latest_time(None, rsi_settings.table_name, "TIMESTAMP") if(last_ts is None): last_ts = 0 con_ochlv = sqlite3.connect(db_settings.db_path) df = pd.read_sql_query(f"SELECT TIMESTAMP, close from Bitfinex_OCHLV_15m WHERE TIMESTAMP >= {last_ts}", con_ochlv) c = df.close.values ts_vals = df.TIMESTAMP.values if(len(ts_vals) > 0): for i in tqdm(range(len(c))): if(ts_vals[i] > last_ts): sample = { 'close': c[i] } rsi.update(ts_vals[i], sample) rsi.flush() print(n, f"Finished update. Last at {datetime.utcfromtimestamp(ts_vals[-1]/1000).strftime("%Y-%m-%d %H:%M:%S")} local datetime.") time.sleep(1 * 60)
import indicators.rsi.rsi_settings as rsi_settings from indicators.rsi.rsi_obj import RSI import ochlv_db.db_settings as db_settings import sqlite3 import pandas as pd from tqdm import tqdm import time from datetime import datetime stop_streaming = False def rsi_fn(n = '-------rsi_idle-------'): kwargs = { 'path': rsi_settings.db_path, 'table_name': rsi_settings.table_name, 'table_list': rsi_settings.table_list, 'update_tdiff': rsi_settings.update_tdiff, } kwargs.update(rsi_settings.settings) rsi = RSI(**kwargs) rsi.update_latest() while stop_streaming == False: print(n) last_ts = rsi.get_latest_time(None, rsi_settings.table_name, "TIMESTAMP") if(last_ts is None): last_ts = 0 con_ochlv = sqlite3.connect(db_settings.db_path) df = pd.read_sql_query(f"SELECT TIMESTAMP, close from Bitfinex_OCHLV_15m WHERE TIMESTAMP >= {last_ts}", con_ochlv) c = df.close.values ts_vals = df.TIMESTAMP.values if(len(ts_vals) > 0): for i in tqdm(range(len(c))): if(ts_vals[i] > last_ts): sample = { 'close': c[i] } rsi.update(ts_vals[i], sample) rsi.flush() print(n, f"Finished update. Last at {datetime.utcfromtimestamp(ts_vals[-1]/1000).strftime('%Y-%m-%d %H:%M:%S')} local datetime.") time.sleep(1 * 60)
# -*- coding: utf-8 -*- #https://www.pythoncentral.io/series/python-sqlalchemy-database-tutorial/ import os import sys if not (os.path.dirname(os.path.dirname(__file__)) in sys.path): sys.path.append(os.path.dirname(os.path.dirname(__file__))) if not (os.path.dirname(__file__) in sys.path): sys.path.append(os.path.dirname(__file__)) import datetime import decimal # # from _serverApp import thisApp from _serverApp import get_debug_option_as_level, get_debug_files, log_message, retrieve_module_configuration, get_globals_from_configuration, save_module_configuration,get_module_debug_level from _serverApp import log_process_start, log_process_finish, log_process_message, log_process_result,log_process_data, log_process_input, log_process_output from _serverApp import set_process_identity_dict, set_process_caller_area, add_apis_to_configuration from _serverApp import build_process_signature, build_process_call_area, get_debug_level, get_debug_files import _database_ganimides_model as dbmodel #import ganimides_openBankingAPI as bankingapi #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #::: module ::: #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: module_Function = 'database adapter' module_ProgramName = 'database api' module_BaseTimeStamp = datetime.datetime.now() module_folder = os.getcwd() module_color = thisApp.Fore.LIGHTMAGENTA_EX module_folder = os.path.dirname(__file__) module_ProgramName = os.path.splitext(os.path.basename(__file__))[0] module_id = f'{module_ProgramName}' module_eyecatch = module_ProgramName module_version = 0.1 module_log_file_name = module_ProgramName+'.log' module_errors_file_name = os.path.splitext(os.path.basename(module_log_file_name))[0]+'_errors.log' module_versionString = f'{module_id} version {module_version}' module_file = __file__ log_file=thisApp.log_file_name print_enabled = thisApp.CONSOLE_ON and thisApp.DEBUG_ON consolelog_enabled = thisApp.CONSOLE_ON and thisApp.DEBUG_ON filelog_enabled = thisApp.FILELOG_ON module_is_externally_configurable = True module_identityDictionary = { 'module_file':__file__, 'module_Function':module_Function, 'module_ProgramName':module_ProgramName, 'module_BaseTimeStamp':module_BaseTimeStamp, 'module_folder':module_folder, 'module_color':module_color, 'module_id':module_id, 'module_eyecatch':module_eyecatch, 'module_version':module_version, 'module_versionString':module_versionString, 'module_log_file_name':module_log_file_name, 'module_errors_file_name': module_errors_file_name, 'consolelog_enabled': consolelog_enabled, 'filelog_enabled': filelog_enabled, 'module_is_externally_configurable':module_is_externally_configurable, } #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: # configuration #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: module_configuration = { } #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: # api services : database apis #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_device(dbsession, action, input_dict, action_filter={}, caller_area={}): _api_name = "dbapi_device" _api_entity = 'DEVICE' _api_action = action _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) if action.upper in ('REGISTER','UNREGISTER'): return dbapi_device_register_unregister(dbsession, action, input_dict, action_filter, caller_area=_process_call_area) log_process_input('', 'input_dict', input_dict,**_process_call_area) log_process_input('', 'action_filter', action_filter,**_process_call_area) log_process_input('', 'caller_area', caller_area,**_process_call_area) action_result = dbsession.table_action(dbmodel.DEVICE, action, input_dict, action_filter, auto_commit=True, caller_area=_process_call_area) api_result = action_result api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_device_register_unregister(dbsession, action, input_dict, action_filter={}, caller_area={}): _api_name="dbapi_device_register_unregister" _api_entity = 'DEVICE' _api_action = action _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) log_process_input('', 'input_dict', input_dict,**_process_call_area) log_process_input('', 'action_filter', action_filter,**_process_call_area) log_process_input('', 'caller_area', caller_area,**_process_call_area) actions_supported=('REGISTER', 'UNREGISTER') now = datetime.datetime.utcnow() if action.upper() not in actions_supported: msg = f"action '{action}' not supported. {actions_supported}" api_result = {'api_status': 'error', 'api_message': msg, 'api_data': actions_supported, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result device = dbsession.get(dbmodel.DEVICE, input_dict, caller_area=_process_call_area) if not device: device_record = device.valid_fields_dictionary(input_dict) msg = f"invalid device" api_result = {'api_status': 'error', 'api_message': msg, 'api_data': device_record, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result client = dbsession.get(dbmodel.CLIENT, input_dict, caller_area=_process_call_area) if not client: client_record = client.valid_fields_dictionary(input_dict) msg = f"invalid client" api_result = {'api_status': 'error', 'api_message': msg, 'api_data': client_record, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result client_id = client.client_id if action.upper() in ('REGISTER'): status = 'Registered' xx = 'for' else: status = 'UnRegistered' xx = 'from' registered_apps=[] if input_dict.get('applications', '').upper() in ('*', 'ALL') \ or input_dict.get('application', '').upper() in ('*', 'ALL') \ or input_dict.get('application_name', '').upper() in ('*', 'ALL'): CLIENT_DEVICE = dbmodel.CLIENT_DEVICE client_devices = dbsession.query(CLIENT_DEVICE).filter(CLIENT_DEVICE.device_uid == device.device_uid, CLIENT_DEVICE.client_id == client_id, CLIENT_DEVICE.status != status).all() if len(client_devices) <= 0: client_devices = dbsession.query(CLIENT_DEVICE).filter(CLIENT_DEVICE.device_uid == device.device_uid, CLIENT_DEVICE.client_id == client_id).all() client_device_records = dbsession.rows_to_dict(CLIENT_DEVICE, client_devices) msg = f"device already {status.upper()} {xx} usage by all applications" api_result = {'api_status': 'success', 'api_message': msg, 'api_data': client_device_records, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result for client_device in client_devices: client_device.status = status application = dbsession.get(dbmodel.APPLICATION, {'application_name': client_device.application_name}, caller_area=_process_call_area) registered_apps.append(application.application_name) dbsession.commit(**_process_call_area) client_device_records = dbsession.rows_to_dict(CLIENT_DEVICE, client_devices) else: application = dbsession.get(dbmodel.APPLICATION, input_dict, caller_area=_process_call_area) if not application: application_record = application.valid_fields_dictionary(input_dict) msg = f"invalid application" api_result = {'api_status': 'error', 'api_message': msg, 'api_data': application_record, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result client_device_record = {'device_uid': device.device_uid, 'client_id': client_id, 'application_name': application.application_name, 'last_usage_timestamp': now, 'status': status} client_device = dbsession.get(dbmodel.CLIENT_DEVICE, client_device_record, caller_area=_process_call_area) if client_device: if client_device.status == status: msg = f"device already {client_device.status.upper()} {xx} usage by application '{client_device.application_name}'" client_device_records = [client_device.to_dict()] api_result = {'api_status': 'success', 'api_message': msg, 'api_data': client_device_records, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result client_device = dbsession.refresh(dbmodel.CLIENT_DEVICE, client_device_record, auto_commit=True, caller_area=_process_call_area) registered_apps.append(application.application_name) client_device_records = [client_device.to_dict()] row_count = len(client_device_records) x='' if row_count > 1: x = 's' msg = f"device {status.upper()} {xx} usage by application{x} {registered_apps}" api_result = {'api_status': 'success', 'api_message': msg, 'api_data': client_device_records, 'api_data_rows': row_count, 'api_action': _api_action.upper(), 'api_name':_api_name } log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_device_usage(dbsession, action, input_dict, action_filter={}, caller_area={}): _api_name = "dbapi_device_usage" _api_entity = 'DEVICE' _api_action = action _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) log_process_input('', 'input_dict', input_dict,**_process_call_area) log_process_input('', 'action_filter', action_filter,**_process_call_area) log_process_input('', 'caller_area', caller_area,**_process_call_area) action_result = dbsession.table_action(dbmodel.DEVICE_USAGE, action, input_dict, action_filter, auto_commit=True, caller_area=_process_call_area) api_result = action_result api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_client(dbsession, action, input_dict, action_filter={}, caller_area={}): _api_name = "dbapi_cient" _api_entity = 'CLIENT' _api_action = action _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) log_process_input('', 'input_dict', input_dict,**_process_call_area) log_process_input('', 'action_filter', action_filter,**_process_call_area) log_process_input('', 'caller_area', caller_area, **_process_call_area) if action.upper().replace('_', '-') in ('SEND-CONFIRMATION-EMAIL', 'SEND-CONFIRMATION-SMS'): client = dbsession.get(dbmodel.CLIENT, action_filter, caller_area=_process_call_area) if not client: msg = f'client not found' api_result = {'api_status': 'error', 'api_message': msg, 'api_data': input_dict} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result client_dict=client.to_dict() if client.confirmed and client.status=='Active': msg = f'client {client.email} already confirmed' api_result = {'api_status': 'success', 'api_message': msg, 'api_data': client_dict} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result if action.upper().replace('_', '-') in ('SEND-CONFIRMATION-EMAIL'): if not client.email: msg = f'email is missing' api_result = {'api_status': 'error', 'api_message': msg, 'api_data': client_dict} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #ok = send_confirmation_email(client.email) ok=True if ok: confirm_filter = {'email': client.email, 'mobile': ''} confirmation = dbsession.get(dbmodel.CLIENT_CONFIRMATION, confirm_filter, caller_area=_process_call_area) if not confirmation: confirm_dict = {'email': client.email, 'mobile': '', 'status': 'Sent'} xapi_result = dbsession.table_action(dbmodel.CLIENT_CONFIRMATION, 'refresh' , confirm_dict, {}, auto_commit=True, caller_area=_process_call_area) if not confirmation.status=='Confirmed': confirm_dict = {'email': client.email, 'mobile': '', 'status': 'Sent', 'send_timestamp': datetime.datetime.utcnow()} xapi_result = dbsession.table_action(dbmodel.CLIENT_CONFIRMATION, 'refresh' , confirm_dict, {}, auto_commit=True, caller_area=_process_call_area) msg = f'OK. a confirmation email sent to {client.email}' api_result = {'api_status': 'success', 'api_message': msg, 'api_data': client_dict} api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result else: msg = f'FAILED to send confirmation email to {client.email}. retry' api_result = {'api_status': 'error', 'api_message': msg, 'api_data': client_dict} api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result if action.upper().replace('_', '-') in ('SEND-CONFIRMATION-SMS'): if not client.mobile: msg = f'mobile has not been defined' api_result = {'api_status': 'error', 'api_message': msg, 'api_data': client_dict} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #ok = send_confirmation_sms(client.mobile) ok=True if ok: confirm_filter = {'mobile': client.mobile, 'email': ''} confirmation = dbsession.get(dbmodel.CLIENT_CONFIRMATION, confirm_filter, caller_area=_process_call_area) if not confirmation: confirm_dict = {'mobile': client.mobile, 'email': '', 'status': 'Sent'} xapi_result = dbsession.table_action(dbmodel.CLIENT_CONFIRMATION, 'refresh' , confirm_dict, {}, auto_commit=True, caller_area=_process_call_area) if not confirmation.status=='Confirmed': confirm_dict = {'mobile': client.mobile, 'email': '', 'status': 'Sent', 'send_timestamp': datetime.datetime.utcnow()} xapi_result = dbsession.table_action(dbmodel.CLIENT_CONFIRMATION, 'refresh' , confirm_dict, {}, auto_commit=True, caller_area=_process_call_area) msg = f'OK. a confirmation sms sent to {client.mobile}' api_result = {'api_status': 'success', 'api_message': msg, 'api_data': client_dict} api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result else: msg = f'FAILED to send confirmation sms to {client.mobile}. retry' api_result = {'api_status': 'error', 'api_message': msg, 'api_data': client_dict} api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result api_result = dbsession.table_action(dbmodel.CLIENT, action , input_dict, action_filter, auto_commit=True, caller_area=_process_call_area) if not api_result.get('api_status') == 'success': log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result if action.upper() in ('UPDATE', 'REFRESH', 'REGISTER', 'ACTIVATE', 'DEACTIVATE', 'CONFIRM'): client_dict = api_result.get('api_data', {}) client_id = client_dict.get('client_id') client_type = client_dict.get('client_type') update_dict = { #'client_id': client_dict.get('client_id'), 'status': client_dict.get('status'), 'email': client_dict.get('email'), 'confirmed': client_dict.get('confirmed'), 'client_status': client_dict.get('status'), 'client_email': client_dict.get('email'), 'client_mobile': client_dict.get('mobile'), 'client_name': client_dict.get('name'), 'client_confirmed': client_dict.get('confirmed'), 'confirmed_timestamp': client_dict.get('confirmed_timestamp'), } xaction = 'update_rows' action_filter = {'client_id': client_id} if client_id and client_type: if client_type == 'merchant': xapi_result = dbsession.table_action(dbmodel.MERCHANT, xaction , update_dict, action_filter, auto_commit=True, caller_area=_process_call_area) elif client_type == 'subscriber': xapi_result = dbsession.table_action(dbmodel.SUBSCRIPTION, xaction , update_dict, action_filter, auto_commit=True, caller_area=_process_call_area) elif client_type == 'customer_service_assistant': xapi_result = dbsession.table_action(dbmodel.CUSTOMER_SERVICE_ASSISTANT, xaction , update_dict, action_filter, auto_commit=True, caller_area=_process_call_area) xapi_result = dbsession.table_action(dbmodel.APPLICATION_USER, xaction , update_dict, action_filter, auto_commit=True, caller_area=_process_call_area) # if action.upper() in ('CONFIRM'): # if input_dict.get('mobile_confirmation_sms'): # # client_dict = api_result.get('api_data', {}) # mobile=client_dict.get('mobile') # confirm_filter = {'mobile': mobile, 'email': ''} # confirmation = dbsession.get(dbmodel.CLIENT_CONFIRMATION, confirm_filter, caller_area=_process_call_area) # if not confirmation: # confirm_dict = {'mobile': mobile, 'email': '', 'status': 'Confirmed', 'confirmed_timestamp': datetime.datetime.utcnow(),'confirmed':1} # xapi_result = dbsession.table_action(dbmodel.CLIENT_CONFIRMATION, 'refresh' , confirm_dict, {}, auto_commit=True, caller_area=_process_call_area) # if not confirmation.status=='Confirmed': # confirm_dict = {'mobile': mobile, 'email': '', 'status': 'Confirmed', 'confirmed_timestamp': datetime.datetime.utcnow(),'confirmed':1} # xapi_result = dbsession.table_action(dbmodel.CLIENT_CONFIRMATION, 'refresh' , confirm_dict, {}, auto_commit=True, caller_area=_process_call_area) # elif input_dict.get('email_confirmation_email'): # # client_dict = api_result.get('api_data', {}) # email=client_dict.get('email') # confirm_filter = {'mobile': '', 'email': email} # confirmation = dbsession.get(dbmodel.CLIENT_CONFIRMATION, confirm_filter, caller_area=_process_call_area) # if not confirmation: # confirm_dict = {'mobile': '', 'email': email, 'status': 'Confirmed', 'confirmed_timestamp': datetime.datetime.utcnow(),'confirmed':1} # xapi_result = dbsession.table_action(dbmodel.CLIENT_CONFIRMATION, 'refresh' , confirm_dict, {}, auto_commit=True, caller_area=_process_call_area) # if not confirmation.status=='Confirmed': # confirm_dict = {'mobile': '', 'email': email, 'status': 'Confirmed', 'confirmed_timestamp': datetime.datetime.utcnow(),'confirmed':1} # xapi_result = dbsession.table_action(dbmodel.CLIENT_CONFIRMATION, 'refresh' , confirm_dict, {}, auto_commit=True, caller_area=_process_call_area) # else: # # client_dict = api_result.get('api_data', {}) # email=client_dict.get('email') # confirm_filter = {'mobile': '', 'email': email} # confirmation = dbsession.get(dbmodel.CLIENT_CONFIRMATION, confirm_filter, caller_area=_process_call_area) # if not confirmation: # confirm_dict = {'mobile': '', 'email': email, 'status': 'Confirmed', 'confirmed_timestamp': datetime.datetime.utcnow(),'confirmed':1} # xapi_result = dbsession.table_action(dbmodel.CLIENT_CONFIRMATION, 'refresh' , confirm_dict, {}, auto_commit=True, caller_area=_process_call_area) # if not confirmation.status=='Confirmed': # confirm_dict = {'mobile': '', 'email': email, 'status': 'Confirmed', 'confirmed_timestamp': datetime.datetime.utcnow(),'confirmed':1} # xapi_result = dbsession.table_action(dbmodel.CLIENT_CONFIRMATION, 'refresh' , confirm_dict, {}, auto_commit=True, caller_area=_process_call_area) api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_client_device(dbsession, action, input_dict, action_filter={}, caller_area={}): _api_name = "dbapi_client_device" _api_entity = 'DEVICE' _api_action = action _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) if action.upper in ('REGISTER','UNREGISTER'): return dbapi_device_register_unregister(dbsession, action, input_dict, action_filter, caller_area=_process_call_area) log_process_input('', 'input_dict', input_dict,**_process_call_area) log_process_input('', 'action_filter', action_filter,**_process_call_area) log_process_input('', 'caller_area', caller_area,**_process_call_area) action_result = dbsession.table_action(dbmodel.CLIENT_DEVICE, action, input_dict, action_filter, auto_commit=True, caller_area=_process_call_area) api_result = action_result api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_verification(dbsession, action, input_dict, action_filter={}, caller_area={}): _api_name = "dbapi_verification" _api_entity = 'VERIFICATION' _api_action = action _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) log_process_input('', 'input_dict', input_dict,**_process_call_area) log_process_input('', 'action_filter', action_filter,**_process_call_area) log_process_input('', 'caller_area', caller_area,**_process_call_area) action_result = dbsession.table_action(dbmodel.VERIFICATION, action, input_dict, action_filter, auto_commit=True, caller_area=_process_call_area) api_result = action_result api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_email_confirmation(dbsession, input_dict, action_filter={}, caller_area={}): _api_name = "dbapi_email_confirmation" _api_entity = 'email' _api_action = 'confirm' _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) log_process_input('', 'input_dict', input_dict,**_process_call_area) log_process_input('', 'action_filter', action_filter,**_process_call_area) log_process_input('', 'caller_area', caller_area,**_process_call_area) verification = dbsession.get(dbmodel.VERIFICATION, input_dict, caller_area=_process_call_area) if not verification: msg = f'email verification failed' api_result = {'api_status': 'error', 'api_message': msg} api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result if not verification.status=='Confirmed': action_filter = {'verification_id': verification.verification_id} update_dict = {'status': 'Confirmed', 'verified': 1, 'verification_timestamp': datetime.datetime.utcnow()} action_result = dbsession.table_action(dbmodel.VERIFICATION,'update', update_dict, action_filter, auto_commit=True, caller_area=_process_call_area) api_result = action_result api_result.update({'api_action': _api_action, 'api_name': _api_name}) client_id = verification.client_id client=dbsession.get(dbmodel.CLIENT, {'client_id':client_id}, caller_area=_process_call_area) if not client: msg = f'mobile verification failed (client_id not found)' api_result = {'api_status': 'error', 'api_message': msg} api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result if not client.email_confirmed or not client.confirmed: update_record = {'client_id': client_id, 'email_confirmed': 1, 'email_confirmed_timestamp': datetime.datetime.utcnow(), 'confirmed': 1} dbreply = dbsession.table_action(dbmodel.CLIENT, 'update', update_record, {'client_id': client_id}, auto_commit=True, caller_area=_process_call_area) client_rec=client.to_dict() if not dbreply.get('api_status')=='success': msg = f'email verification failed (client update failed)' api_result = {'api_status': 'error', 'api_message': msg, 'api_data': client_rec} api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result else: msg = f'OK. client email confirmed' api_result = {'api_status': 'success', 'api_message': msg,'api_data':client_rec} api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result else: client_rec=client.to_dict() msg = f'OK. client email already confirmed' api_result = {'api_status': 'success', 'api_message': msg,'api_data':client_rec} api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_mobile_confirmation(dbsession, input_dict, action_filter={}, caller_area={}): _api_name = "dbapi_mobile_confirmation" _api_entity = 'mobile' _api_action = 'confirm' _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) log_process_input('', 'input_dict', input_dict,**_process_call_area) log_process_input('', 'action_filter', action_filter,**_process_call_area) log_process_input('', 'caller_area', caller_area,**_process_call_area) verification = dbsession.get(dbmodel.VERIFICATION, input_dict, caller_area=_process_call_area) if not verification: msg = f'mobile verification failed' api_result = {'api_status': 'error', 'api_message': msg} api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result if not verification.status=='Confirmed': action_filter = {'verification_id': verification.verification_id} update_dict = {'status': 'Confirmed', 'verified': 1, 'verification_timestamp': datetime.datetime.utcnow()} action_result = dbsession.table_action(dbmodel.VERIFICATION,'update', update_dict, action_filter, auto_commit=True, caller_area=_process_call_area) api_result = action_result api_result.update({'api_action': _api_action, 'api_name': _api_name}) client_id = verification.client_id client=dbsession.get(dbmodel.CLIENT, {'client_id':client_id}, caller_area=_process_call_area) if not client: msg = f'mobile verification failed (client_id not found)' api_result = {'api_status': 'error', 'api_message': msg} api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result if not client.mobile_confirmed or not client.confirmed: update_record = {'client_id': client_id, 'mobile_confirmed': 1, 'mobile_confirmed_timestamp': datetime.datetime.utcnow(), 'confirmed': 1} dbreply = dbsession.table_action(dbmodel.CLIENT, 'update', update_record, {'client_id': client_id}, auto_commit=True, caller_area=_process_call_area) client_rec=client.to_dict() if not dbreply.get('api_status')=='success': msg = f'mobile verification failed (client update failed)' api_result = {'api_status': 'error', 'api_message': msg, 'api_data': client_rec} api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result else: msg = f'OK. client mobile confirmed' api_result = {'api_status': 'success', 'api_message': msg,'api_data':client_rec} api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result else: client_rec=client.to_dict() msg = f'OK. client mobile already confirmed' api_result = {'api_status': 'success', 'api_message': msg,'api_data':client_rec} api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_api(dbsession, action, input_dict, action_filter={}, caller_area={}): _api_name = "dbapi_api" _api_entity = 'API' _api_action = action _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) if action.upper() in ('REGISTER', 'UNREGISTER'): api_result = dbapi_api_register_unregister(dbsession, action, input_dict, action_filter, caller_area=_process_call_area) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result log_process_input('', 'input_dict', input_dict,**_process_call_area) log_process_input('', 'action_filter', action_filter,**_process_call_area) log_process_input('', 'caller_area', caller_area,**_process_call_area) action_result = dbsession.table_action(dbmodel.API, action, input_dict, action_filter, auto_commit=True, caller_area=_process_call_area) api_result = action_result api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_api_register_unregister(dbsession, action, input_dict, action_filter={}, caller_area={}): _api_name = "dbapi_api_register_unregister" _api_entity = 'APPLICATION API' _api_action = action _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) if action.upper() not in ('REGISTER','UNREGISTER'): msg = f'invalid action [[{action}]] requested. use REGISTER or UNREGISTER' api_result = {'api_status': 'error', 'api_message': msg, 'api_data': input_dict} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result log_process_input('', 'input_dict', input_dict,**_process_call_area) log_process_input('', 'action_filter', action_filter,**_process_call_area) log_process_input('', 'caller_area', caller_area,**_process_call_area) if _api_action.upper() == 'REGISTER': api=dbsession.get(dbmodel.API, input_dict, caller_area=_process_call_area) if not api: msg = f'api not found' api_result = {'api_status': 'error', 'api_message': msg, 'api_data': input_dict} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result if not api.status=='Active': msg = f'api not Active' api_result = {'api_status': 'error', 'api_message': msg, 'api_data': input_dict} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result application=dbsession.get(dbmodel.APPLICATION, input_dict, caller_area=_process_call_area) if not application: msg = f'application not found' api_result = {'api_status': 'error', 'api_message': msg, 'api_data': input_dict} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result if not application.status=='Active': msg = f'application not Active' api_result = {'api_status': 'error', 'api_message': msg, 'api_data': input_dict} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result input_dict.update({'api_id':api.api_id}) input_dict.update({'api_name':api.api_name}) input_dict.update({'application_id': application.application_id}) input_dict.update({'application_name': application.application_name}) input_dict.update({'subscription_id': application.subscription_id}) action_filter={} api_registered = dbsession.get(dbmodel.APPLICATION_API, input_dict, caller_area=_process_call_area) if api_registered: input_dict.update({'application_api_id': api_registered.application_api_id}) action_filter = {'application_api_id': api_registered.application_api_id} input_dict.update({'status': 'Active'}) action='REFRESH' action_result = dbsession.table_action(dbmodel.APPLICATION_API, action , input_dict, action_filter, auto_commit=True, caller_area=_process_call_area) api_result = action_result api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result elif _api_action.upper() == 'UNREGISTER': api=dbsession.get(dbmodel.API, input_dict, caller_area=_process_call_area) if api: input_dict.update({'api_id':api.api_id}) input_dict.update({'api_name':api.api_name}) application=dbsession.get(dbmodel.APPLICATION, input_dict, caller_area=_process_call_area) if application: input_dict.update({'application_id': application.application_id}) input_dict.update({'application_name': application.application_name}) api_registered = dbsession.get(dbmodel.APPLICATION_API, input_dict, caller_area=_process_call_area) if not api_registered: msg = f'record not found' api_result = {'api_status': 'error', 'api_message': msg, 'api_data': input_dict} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result input_dict.update({'application_api_id': api_registered.application_api_id}) input_dict.update({'status':'Unregistered'}) action_filter={'application_api_id': api_registered.application_api_id} action='UPDATE' action_result = dbsession.table_action(dbmodel.APPLICATION_API, action, input_dict, action_filter, auto_commit=True, caller_area=_process_call_area) api_result = action_result api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result else: msg = f'invalid action [[{action}]] requested. use REGISTER or UNREGISTER' api_result = {'api_status': 'error', 'api_message': msg, 'api_data': input_dict} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_application(dbsession, action, input_dict, action_filter={}, caller_area={}): _api_name = "dbapi_application" _api_entity = 'APPLICATION' _api_action = action _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) if action.upper() in ('API_REGISTER', 'API_UNREGISTER'): xaction=action.upper().replace('API_','') return dbapi_api_register_unregister(dbsession, xaction, input_dict, action_filter, caller_area=_process_call_area) log_process_input('', 'input_dict', input_dict,**_process_call_area) log_process_input('', 'action_filter', action_filter,**_process_call_area) log_process_input('', 'caller_area', caller_area,**_process_call_area) if action.upper() == 'VALIDATE' or action.upper() == 'VALIDATE_CREDENTIALS': application_name=input_dict.get('application_name') if not application_name: application_name=action_filter.get('application_name') client_id=input_dict.get('client_id') if not client_id: client_id=input_dict.get('application_client_id') if not client_id: client_id=action_filter.get('client_id') if not client_id: client_id=action_filter.get('application_client_id') client_secretKey = input_dict.get('client_secretKey') if not client_secretKey: client_secretKey=input_dict.get('application_client_secretKey') if not client_secretKey: client_secretKey=action_filter.get('client_secretKey') if not client_secretKey: client_secretKey=action_filter.get('application_client_secretKey') return dbapi_application_credentials_are_valid(dbsession, application_name, client_id, client_secretKey) if action.upper() in ('ADD','INSERT','REGISTER','REFRESH'): if not input_dict.get('application_name'): msg = f'application name not defined' api_result = {'api_status': 'error', 'api_message': msg, 'api_data': input_dict} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result if not input_dict.get('subscription_id') and not input_dict.get('client_id'): msg = f'subscription not defined' api_result = {'api_status': 'error', 'api_message': msg, 'api_data': input_dict} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result subscription = dbsession.get(dbmodel.SUBSCRIPTION, input_dict, caller_area=_process_call_area) if not subscription: client=dbsession.get(dbmodel.CLIENT, input_dict, caller_area=_process_call_area) if client: input_dict.update({'client_id': client.client_id}) subscription = dbsession.get(dbmodel.SUBSCRIPTION, input_dict, caller_area=_process_call_area) if not subscription: msg = f'subscription not found' api_result = {'api_status': 'error', 'api_message': msg, 'api_data': input_dict} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result if not subscription.status=='Active': msg = f'subscription not Active' api_result = {'api_status': 'error', 'api_message': msg, 'api_data': input_dict} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result app_rec={'application_name':input_dict.get('application_name')} application = dbsession.get(dbmodel.APPLICATION, app_rec, caller_area=_process_call_area) if application: if not application.subscription_id == subscription.subscription_id: msg = f'application {application.application_name} already in used. try another name' api_result = {'api_status': 'error', 'api_message': msg, 'api_data': input_dict} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result subscription_record = subscription.to_dict() input_dict.update(subscription_record) client=dbsession.get(dbmodel.CLIENT, input_dict, caller_area=_process_call_area) if not client: msg = f'client not found' api_result = {'api_status': 'systemerror', 'api_message': msg, 'api_data': input_dict} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result if not client.status=='Active': msg = f'client not Active' api_result = {'api_status': 'system error', 'api_message': msg, 'api_data': input_dict} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result action_result = dbsession.table_action(dbmodel.APPLICATION, action , input_dict, action_filter, auto_commit=True, caller_area=_process_call_area) api_result = action_result api_result.update({'api_action': _api_action, 'api_name': _api_name}) if api_result.get('api_status') == 'success': user_dict=api_result.get('api_data') user_dict.update({'user_role':'owner'}) dbapi_application_USER(dbsession, 'register', user_dict,caller_area=_process_call_area) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_application_credentials_are_valid(dbsession, application_name, client_id, client_secretKey ,caller_area={}): _api_name = "dbapi_application_credentials_are_valid" _api_entity = 'APPLICATION' _api_action = 'validation' _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) log_process_input('', 'application_name', application_name,**_process_call_area) log_process_input('', 'client_id', client_id,**_process_call_area) log_process_input('', 'client_secretKey', client_secretKey,**_process_call_area) application=dbsession.get(dbmodel.APPLICATION, {'application_name': application_name}, caller_area=_process_call_area) if not application: api_result=False else: if not application.client_id == client_id or not application.client_secretKey == client_secretKey: api_result=False else: api_result=True log_process_result(_api_msgID, api_result, data_name='application_credentials_are_valid', **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_application_api(dbsession, action, input_dict, action_filter={}, caller_area={}): _api_name = "dbapi_application_api" _api_entity = 'APPLICATION_API' _api_action = action _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) log_process_input('', 'input_dict', input_dict,**_process_call_area) log_process_input('', 'action_filter', action_filter,**_process_call_area) log_process_input('', 'caller_area', caller_area,**_process_call_area) action_result = dbsession.table_action(dbmodel.APPLICATION_API, action , input_dict, action_filter, auto_commit=True, caller_area=_process_call_area) api_result = action_result api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_application_USER(dbsession, action, input_dict, action_filter={}, caller_area={}): _api_name = "dbapi_application_user" _api_entity = 'APPLICATION_USER' _api_action = action _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) log_process_input('', 'input_dict', input_dict,**_process_call_area) log_process_input('', 'action_filter', action_filter,**_process_call_area) log_process_input('', 'caller_area', caller_area,**_process_call_area) if action.upper() in ('REGISTER', 'UNREGISTER', 'ADD', 'REFRESH'): if not input_dict.get('user_role'): msg = f'user role not defined' log_process_message('', 'error', msg, **_process_call_area) api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result client_id = input_dict.get('client_id') if client_id: client = dbsession.get(dbmodel.CLIENT, {'client_id': client_id}, caller_area=_process_call_area) else: client = dbsession.refresh(dbmodel.CLIENT, input_dict, auto_commit=True, caller_area=_process_call_area) if not client: msg = f'client not found' log_process_message('', 'error', msg, **_process_call_area) api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result if not client.status == 'Active': msg = f"client not Active.(status:{client.status})" log_process_message('', 'warning', msg, **_process_call_area) # api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} # log_process_finish(_api_msgID, api_result, **_process_call_area) #return api_result application_name=input_dict.get('application_name') application = dbsession.get(dbmodel.APPLICATION, {'application_name': application_name}, caller_area=_process_call_area) if not application: application_id=input_dict.get('application_id') application = dbsession.get(dbmodel.APPLICATION, {'application_id': application_id}, caller_area=_process_call_area) if not application: msg = f'application not found' log_process_message('', 'error', msg, **_process_call_area) api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result if not application.status == 'Active': msg = f"application not Active.(status:{application.status})" log_process_message('', 'error', msg, **_process_call_area) api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result app_rec = application.to_dict() client_rec = client.to_dict() input_dict.update(app_rec) input_dict.update(client_rec) # input_dict.update({'application_name': application.application_name}) # input_dict.update({'application_id': application.application_id}) # input_dict.update({'client_id': application.client_id}) # input_dict.update({'client_id': application.client_id}) # if not input_dict.get('status'): # input_dict.update({'status': 'Active'}) action_result = dbsession.table_action(dbmodel.APPLICATION_USER, action , input_dict, action_filter, auto_commit=True, caller_area=_process_call_area) api_result = action_result api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_application_template(dbsession, action, input_dict, action_filter={}, caller_area={}): _api_name = "dbapi_template" _api_entity = 'TEMPLATE' _api_action = action _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) log_process_input('', 'input_dict', input_dict,**_process_call_area) log_process_input('', 'action_filter', action_filter,**_process_call_area) log_process_input('', 'caller_area', caller_area,**_process_call_area) action_result = dbsession.table_action(dbmodel.APPLICATION_TEMPLATE, action, input_dict, action_filter, auto_commit=True, caller_area=_process_call_area) api_result = action_result api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_token(dbsession, action, input_dict, action_filter={}, caller_area={}): _api_name = "dbapi_token" _api_entity = 'TOKEN' _api_action = action _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) log_process_input('', 'input_dict', input_dict,**_process_call_area) log_process_input('', 'action_filter', action_filter,**_process_call_area) log_process_input('', 'caller_area', caller_area,**_process_call_area) action_result = dbsession.table_action(dbmodel.TOKEN, action , input_dict, action_filter, auto_commit=True, caller_area=_process_call_area) api_result = action_result api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_subscription(dbsession, action, input_dict, action_filter={}, caller_area={}): _api_name = "dbapi_subscription" _api_entity = 'SUBSCRIPTION' _api_action = action _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) log_process_input('', 'input_dict', input_dict,**_process_call_area) log_process_input('', 'action_filter', action_filter,**_process_call_area) log_process_input('', 'caller_area', caller_area,**_process_call_area) input_dict.update({'client_type': 'subscriber'}) if action.upper() in ('REGISTER','ADD','REFRESH'): user = dbsession.get(dbmodel.USER, input_dict, caller_area=_process_call_area) if not user: user_id = '' else: user_id = user.user_id input_dict.update({'user_id': user_id}) if action.upper() in ('REGISTER','ADD','REFRESH'): xaction='REFRESH' action_result = dbsession.table_action(dbmodel.CLIENT, xaction, input_dict, action_filter, auto_commit=True, caller_area=_process_call_area) api_result = action_result thismsg=action_result.get('api_message') api_result.update({'api_action': _api_action, 'api_name': _api_name}) if not api_result.get('api_status') == 'success': msg = f"subscription not registered. client record create failed" log_process_message(_api_msgID, 'error', msg, **_process_call_area) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result client = api_result.get('api_data') client_id = client.get('client_id') client_secretKey = client.get('client_secretKey') input_dict.update({'client_id': client_id}) input_dict.update({'client_secretKey': client_secretKey}) elif action.upper() in ('CONFIRM', 'ACTIVATE', 'DEACTIVATE', 'DELETE'): subscription_dict = dbsession.get(dbmodel.SUBSCRIPTION, input_dict, 'DICT', caller_area=_process_call_area) if not subscription_dict: msg = f'subscription not found' action_status='error' api_result = {'api_status': action_status, 'api_message': msg, 'api_data': input_dict, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result client=dbsession.get(dbmodel.CLIENT, subscription_dict,'', caller_area=_process_call_area) if not client: msg = f'client not found' action_status='error' api_result = {'api_status': action_status, 'api_message': msg, 'api_data': subscription_dict, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result client_id = client.client_id client_secretKey = client.client_secretKey input_dict.update({'client_id': client_id}) input_dict.update({'client_secretKey': client_secretKey}) api_result = dbsession.table_action(dbmodel.CLIENT, action, input_dict, action_filter, auto_commit=True, caller_area=_process_call_area) api_result.update({'api_action': _api_action, 'api_name': _api_name}) thismsg=api_result.get('api_message') if not api_result.get('api_status') == 'success': msg = f'action {action.upper()} on client {client_id} failed' log_process_message(_api_msgID, 'error', msg, **_process_call_area) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result client_dict = api_result.get('api_data', {}) client_status = client_dict.get('status') subscription_dict = dbsession.get(dbmodel.SUBSCRIPTION, subscription_dict, 'DICT', caller_area=_process_call_area) client_id=subscription_dict.get('client_id') input_dict.update({'status': client_status}) input_dict.update({'client_id': client_id}) action_result = dbsession.table_action(dbmodel.SUBSCRIPTION, action, input_dict, action_filter, auto_commit=True, caller_area=_process_call_area) api_result = action_result thismsg=thismsg.replace('CLIENT',_api_entity) api_result.update({'api_action': _api_action, 'api_name': _api_name,'api_message':thismsg}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_user(dbsession, action, input_dict, action_filter={}, caller_area={}): _api_name = "dbapi_user" _api_entity = 'USER' _api_action = action _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) log_process_input('', 'input_dict', input_dict,**_process_call_area) log_process_input('', 'action_filter', action_filter,**_process_call_area) log_process_input('', 'caller_area', caller_area,**_process_call_area) api_result = dbsession.table_action(dbmodel.USER, action , input_dict, action_filter, auto_commit=True, caller_area=_process_call_area) if not api_result.get('api_status') == 'success': log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result # #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_merchant(dbsession, action, input_dict, action_filter={}, caller_area={}): _api_name = "dbapi_merchant" _api_entity = 'MERCHANT' _api_action = action _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) if (action.upper().find('BANKACCOUNT') >= 0 and action.upper().find('GET') >= 0) or action.upper() in ('BANKACCOUNTS', 'BANKACCOUNT'): return dbapi_merchant_get_bankaccounts(dbsession, input_dict, action_filter, caller_area=_process_call_area) elif action.upper().find('BANKACCOUNT') >= 0 and action.upper().find('REGISTER') >= 0: return dbapi_merchant_bankaccount_register(dbsession, input_dict, action_filter, caller_area=_process_call_area) log_process_input('', 'input_dict', input_dict,**_process_call_area) log_process_input('', 'action_filter', action_filter,**_process_call_area) log_process_input('', 'caller_area', caller_area,**_process_call_area) input_dict.update({'client_type': 'merchant'}) if action.upper() in ('REGISTER','ADD','REFRESH'): action='REFRESH' action_result = dbsession.table_action(dbmodel.CLIENT, action, input_dict, action_filter, auto_commit=True, caller_area=_process_call_area) api_result = action_result api_result.update({'api_action': _api_action, 'api_name': _api_name}) thismsg=action_result.get('api_message') if not api_result.get('api_status') == 'success': log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result client = api_result.get('api_data') client_id = client.get('client_id') input_dict.update({'client_id': client_id}) elif action.upper() in ('CONFIRM', 'ACTIVATE', 'DEACTIVATE', 'DELETE'): merchant_dict = dbsession.get(dbmodel.MERCHANT, input_dict, 'DICT', caller_area=_process_call_area) if not merchant_dict: msg = f'merchant not found' action_status='error' api_result = {'api_status': action_status, 'api_message': msg, 'api_data': input_dict, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result client_dict=dbsession.get(dbmodel.CLIENT, merchant_dict,'DICT', caller_area=_process_call_area) if not client_dict: msg = f'client not found' action_status='error' api_result = {'api_status': action_status, 'api_message': msg, 'api_data': merchant_dict, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #action='CONFIRM' action_result = dbsession.table_action(dbmodel.CLIENT, action, input_dict, action_filter, auto_commit=True, caller_area=_process_call_area) api_result = action_result api_result.update({'api_action': _api_action, 'api_name': _api_name}) thismsg=action_result.get('api_message') # api_result = dbapi_client_confirm(client_dict) if not api_result.get('api_status') == 'success': # msg = f'client confirmation failed' # api_result.update({'api_message':msg}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result merchant_dict = dbsession.get(dbmodel.MERCHANT, merchant_dict, 'DICT', caller_area=_process_call_area) status=merchant_dict.get('status') client_id=merchant_dict.get('client_id') # if not merchant_dict.get('status') == 'Active': # msg = f"service provider not confirmed. status={status}" # action_status='error' # api_result = {'api_status': action_status, 'api_message': msg, 'api_data': merchant_dict, 'messages': messages, 'rows_added': rows_added, 'rows_updated': rows_updated, 'api_action': _api_action.upper(), 'api_name': _api_name} # log_process_finish(_api_msgID, api_result, **_process_call_area) # return api_result input_dict.update({'status': status}) input_dict.update({'client_id': client_id}) action_result = dbsession.table_action(dbmodel.MERCHANT, action, input_dict, action_filter, auto_commit=True, caller_area=_process_call_area) api_result = action_result #thismsg=thismsg.replace('CLIENT',_api_entity) api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_retail_store(dbsession, action, input_dict, action_filter={}, caller_area={}): _api_name = "dbapi_retail_store" _api_entity = 'RETAIL_STORE' _api_action = action _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) log_process_input('', 'input_dict', input_dict,**_process_call_area) log_process_input('', 'action_filter', action_filter,**_process_call_area) log_process_input('', 'caller_area', caller_area,**_process_call_area) if action.upper() in ('REGISTER', 'ADD', 'REFRESH'): merchant_id = input_dict.get('merchant_id') merch_rec = {'merchant_id': merchant_id} merchant_name = input_dict.get('merchant_name') if merchant_name: merch_rec.update({'merchant_name':merchant_name}) merchant = dbsession.get(dbmodel.MERCHANT, merch_rec, caller_area=_process_call_area) if not merchant: msg = f'merchant not found' log_process_message('', 'error', msg, **_process_call_area) api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result if not merchant.status == 'Active': msg = f"merchant not Active.(status:{merchant.status})" log_process_message('', 'error', msg, **_process_call_area) api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result input_dict.update({'merchant_id': merchant.merchant_id}) if not input_dict.get('status'): input_dict.update({'status': 'Active'}) action_result = dbsession.table_action(dbmodel.RETAIL_STORE, action , input_dict, action_filter, auto_commit=True, caller_area=_process_call_area) api_result = action_result api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_pointofsale(dbsession, action, input_dict, action_filter={}, caller_area={}): _api_name = "dbapi_pointofsale" _api_entity = 'POINT_OF_SALE' _api_action = action _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) if action.upper().find('BANKACCOUNT') >= 0 and (action.upper().find('ADD') >= 0 or action.upper().find('REGISTER') >= 0): return dbapi_pointofsale_bankaccount_remove(dbsession, input_dict, action_filter, caller_area=_process_call_area) elif action.upper().find('BANKACCOUNT') >= 0 and (action.upper().find('REMOVE') >= 0 or action.upper().find('DELETE') >= 0): return dbapi_pointofsale_bankaccount_add(dbsession, input_dict, action_filter, caller_area=_process_call_area) log_process_input('', 'input_dict', input_dict,**_process_call_area) log_process_input('', 'action_filter', action_filter,**_process_call_area) log_process_input('', 'caller_area', caller_area,**_process_call_area) if action.upper() in ('REGISTER', 'ADD', 'REFRESH'): retail_store_id = input_dict.get('retail_store_id') store_rec={'retail_store_id':retail_store_id} retail_store = dbsession.get(dbmodel.RETAIL_STORE, store_rec, caller_area=_process_call_area) if not retail_store: msg = f'retail_store not found' log_process_message('', 'error', msg, **_process_call_area) api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result if not retail_store.status == 'Active': msg = f"retail_store not Active.(status:{retail_store.status})" log_process_message('', 'error', msg, **_process_call_area) api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result input_dict.update({'retail_store_id': retail_store.retail_store_id}) merchant_id = retail_store.merchant_id merch_rec = {'merchant_id': merchant_id} merchant = dbsession.get(dbmodel.MERCHANT, merch_rec, caller_area=_process_call_area) if not merchant: msg = f'merchant not found' log_process_message('', 'error', msg, **_process_call_area) api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result if not merchant.status == 'Active': msg = f"merchant not Active.(status:{merchant.status})" log_process_message('', 'error', msg, **_process_call_area) api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result input_dict.update({'merchant_id': merchant.merchant_id}) if not input_dict.get('status'): input_dict.update({'status': 'Active'}) action_result = dbsession.table_action(dbmodel.POINT_OF_SALE, action , input_dict, action_filter, auto_commit=True, caller_area=_process_call_area) api_result = action_result api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_service_point(dbsession, action, input_dict, action_filter={}, caller_area={}): _api_name = "dbapi_service_point" _api_entity = 'SERVICE_POINT' _api_action = action _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) log_process_input('', 'input_dict', input_dict,**_process_call_area) log_process_input('', 'action_filter', action_filter,**_process_call_area) log_process_input('', 'caller_area', caller_area,**_process_call_area) if action.upper() in ('REGISTER', 'ADD', 'REFRESH'): retail_store_id = input_dict.get('retail_store_id') store_rec={'retail_store_id':retail_store_id} retail_store = dbsession.get(dbmodel.RETAIL_STORE, store_rec, caller_area=_process_call_area) if not retail_store: msg = f'retail_store not found' log_process_message('', 'error', msg, **_process_call_area) api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result if not retail_store.status == 'Active': msg = f"retail_store not Active.(status:{retail_store.status})" log_process_message('', 'error', msg, **_process_call_area) api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result input_dict.update({'retail_store_id': retail_store.retail_store_id}) merchant_id = retail_store.merchant_id merch_rec = {'merchant_id': merchant_id} merchant = dbsession.get(dbmodel.MERCHANT, merch_rec, caller_area=_process_call_area) if not merchant: msg = f'merchant not found' log_process_message('', 'error', msg, **_process_call_area) api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result if not merchant.status == 'Active': msg = f"merchant not Active.(status:{merchant.status})" log_process_message('', 'error', msg, **_process_call_area) api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result input_dict.update({'merchant_id': merchant.merchant_id}) if not input_dict.get('status'): input_dict.update({'status': 'Active'}) action_result = dbsession.table_action(dbmodel.SERVICE_POINT, action , input_dict, action_filter, auto_commit=True, caller_area=_process_call_area) api_result = action_result api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_customer_service_assistant(dbsession, action, input_dict, action_filter={}, caller_area={}): _api_name = "dbapi_customer_service_assistant" _api_entity = 'CUSTOMER_SERVICE_ASSISTANT' _api_action = action _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) log_process_input('', 'input_dict', input_dict,**_process_call_area) log_process_input('', 'action_filter', action_filter,**_process_call_area) log_process_input('', 'caller_area', caller_area,**_process_call_area) if action.upper() in ('REGISTER', 'ADD', 'REFRESH'): merchant_id=input_dict.get('merchant_id') merch_rec = {'merchant_id': merchant_id} merchant = dbsession.get(dbmodel.MERCHANT, merch_rec, caller_area=_process_call_area) if not merchant: msg = f'merchant not found' log_process_message('', 'error', msg, **_process_call_area) api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result if not merchant.status == 'Active': msg = f"merchant not Active.(status:{merchant.status})" log_process_message('', 'error', msg, **_process_call_area) api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result if not input_dict.get('status'): input_dict.update({'status': 'Active'}) #client if input_dict.get('email'): action='REFRESH' input_dict.update({'client_type': 'customer_service_assistant'}) action_result = dbsession.table_action(dbmodel.CLIENT, action, input_dict, action_filter, auto_commit=True, caller_area=_process_call_area) api_result = action_result api_result.update({'api_action': _api_action, 'api_name': _api_name}) thismsg=action_result.get('api_message') if not api_result.get('api_status') == 'success': log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result client = api_result.get('api_data') client_id = client.get('client_id') input_dict.update({'client_id': client_id}) action_result = dbsession.table_action(dbmodel.CUSTOMER_SERVICE_ASSISTANT, action , input_dict, action_filter, auto_commit=True, caller_area=_process_call_area) api_result = action_result api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_merchant_get_bankaccounts(dbsession, merchant_record, action_filter={}, caller_area={}): _api_name = "dbapi_merchant_get_bankaccounts" _api_entity = 'MERCHANT' _api_action = 'get_bank_accounts' _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) log_process_input('', 'input_dict', merchant_record,**_process_call_area) log_process_input('', 'action_filter', action_filter,**_process_call_area) log_process_input('', 'caller_area', caller_area,**_process_call_area) merchant = dbsession.get(dbmodel.MERCHANT, merchant_record, caller_area=_process_call_area) if not merchant: msg = f'merchant not found' log_process_message('', 'error', msg, **_process_call_area) api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result if not merchant.status == 'Active': msg = f"merchant not Active.(status:{merchant.status})" log_process_message('', 'error', msg, **_process_call_area) api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result client_id = merchant.client_id merchant_accounts=[] filterJson = {"client_id": client_id, "status": 'Active'} bank_accounts=dbsession.get_rows(dbmodel.BANK_ACCOUNT, filterJson, caller_area=_process_call_area) if bank_accounts: msg = f'[{len(bank_accounts)} bank accounts found] for merchant [{merchant.name}] client_id [{client_id}]' log_process_message('', 'success', msg, **_process_call_area) for bank_account in bank_accounts: bank_account_id = str(bank_account.bank_account_id) bank_accountID = str(bank_account.bank_accountID) merchant_accounts.append(bank_account_id) msg = f'OK. [{len(merchant_accounts)} bank accounts]' api_result = {'api_status': 'success', 'api_message': msg, 'data_records': len(merchant_accounts), 'api_data': merchant_accounts, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_merchant_bankaccount_register(dbsession, bankaccount_record, action_filter={}, caller_area={}): _api_name = "dbapi_merchant_bankaccount_register" _api_entity = 'MERCHANT' _api_action = 'register_bank_account' _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) log_process_input('', 'input_dict', bankaccount_record,**_process_call_area) log_process_input('', 'action_filter', action_filter,**_process_call_area) log_process_input('', 'caller_area', caller_area,**_process_call_area) merchant = dbsession.get(dbmodel.MERCHANT, bankaccount_record, caller_area=_process_call_area) if not merchant: msg = f'merchant not found' log_process_message('', 'error', msg, **_process_call_area) api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result if not merchant.status == 'Active': msg = f"merchant not Active.(status:{merchant.status})" log_process_message('', 'error', msg, **_process_call_area) api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result bank_account_id=None account_id = bankaccount_record.get('bank_account_id') if account_id: bank_account_id = dbapi_get_bank_account_id(dbsession, account_id, caller_area={}) if not bank_account_id: account_id = bankaccount_record.get('bank_accountID') if account_id: bank_account_id = dbapi_get_bank_account_id(dbsession, account_id, caller_area={}) if not bank_account_id: account_id = bankaccount_record.get('bank_account') if account_id: bank_account_id = dbapi_get_bank_account_id(dbsession, account_id, caller_area={}) if not bank_account_id: msg = f"bank_account not found" log_process_message('', 'error', msg, **_process_call_area) api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result bankaccount_record.update({'bank_account_id':bank_account_id}) bankaccount_record.update({'merchant_id':merchant.merchant_id}) bank_account = dbsession.get(dbmodel.BANK_ACCOUNT, {'bank_account_id':bank_account_id}, caller_area=_process_call_area) if not bank_account: msg = f"bank account not found" log_process_message('', 'error', msg, **_process_call_area) api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result if not bank_account.status == 'Active': msg = f"bank account {bank_account_id} not Active (status:{bank_account.status})" log_process_message('', 'error', msg, **_process_call_area) api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result merchant_record = bank_account.to_dict() merchant_record.update({'merchant_id': merchant.merchant_id}) api_result = dbsession.table_action(dbmodel.MERCHANT, 'UPDATE', merchant_record, action_filter, auto_commit=True, caller_area=_process_call_area) api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_pointofsale_bankaccount_add(dbsession, bankaccount_record, action_filter={}, caller_area={}): _api_name = "dbapi_pointofsale_bankaccount_add" _api_entity = 'POINT_OF_SALE' _api_action = 'add_bank_account' _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) log_process_input('', 'input_dict', bankaccount_record,**_process_call_area) log_process_input('', 'action_filter', action_filter,**_process_call_area) log_process_input('', 'caller_area', caller_area,**_process_call_area) pointofsale = dbsession.get(dbmodel.POINT_OF_SALE, bankaccount_record, caller_area=_process_call_area) if not pointofsale: msg = f'pointofsale not found' log_process_message('', 'error', msg, **_process_call_area) api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result if not pointofsale.status == 'Active': msg = f"pointofsale not Active.(status:{pointofsale.status})" log_process_message('', 'error', msg, **_process_call_area) api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result bank_account_id=None account_id = bankaccount_record.get('bank_account_id') if account_id: bank_account_id = dbapi_get_bank_account_id(dbsession, account_id, caller_area={}) if not bank_account_id: account_id = bankaccount_record.get('bank_accountID') if account_id: bank_account_id = dbapi_get_bank_account_id(dbsession, account_id, caller_area={}) if not bank_account_id: account_id = bankaccount_record.get('bank_account') if account_id: bank_account_id = dbapi_get_bank_account_id(dbsession, account_id, caller_area={}) if not bank_account_id: msg = f"bank_account not found" log_process_message('', 'error', msg, **_process_call_area) api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result bankaccount_record.update({'bank_account_id':bank_account_id}) bankaccount_record.update({'pointofsale_id':pointofsale.pointofsale_id}) bank_account = dbsession.get(dbmodel.BANK_ACCOUNT, {'bank_account_id':bank_account_id}, caller_area=_process_call_area) if not bank_account: msg = f"bank account not found" log_process_message('', 'error', msg, **_process_call_area) api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result if not bank_account.status == 'Active': msg = f"bank account {bank_account_id} not Active (status:{bank_account.status})" log_process_message('', 'error', msg, **_process_call_area) api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result pointofsale_record = bank_account.to_dict() pointofsale_record.update({'pointofsale_id': pointofsale.pointofsale_id}) api_result = dbsession.table_action(dbmodel.POINT_OF_SALE, 'UPDATE', pointofsale_record, action_filter, auto_commit=True, caller_area=_process_call_area) api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_pointofsale_bankaccount_remove(dbsession, pointofsale_record, action_filter={}, caller_area={}): _api_name = "dbapi_pointofsale_bankaccount_remove" _api_entity = 'POINT_OF_SALE' _api_action = 'remove_bank_account' _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) log_process_input('', 'input_dict', pointofsale_record,**_process_call_area) log_process_input('', 'action_filter', action_filter,**_process_call_area) log_process_input('', 'caller_area', caller_area,**_process_call_area) pointofsale = dbsession.get(dbmodel.POINT_OF_SALE, pointofsale_record, caller_area=_process_call_area) if not pointofsale: msg = f'pointofsale not found' log_process_message('', 'error', msg, **_process_call_area) api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result if not pointofsale.status == 'Active': msg = f"pointofsale not Active.(status:{pointofsale.status})" log_process_message('', 'error', msg, **_process_call_area) api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result pointofsale_record = pointofsale.to_dict() pointofsale_record.update({ 'bank_account_id' : '', 'bank_subscription_id' : '', 'bank_code' : '', 'bank_subscriptionID' : '', 'bank_accountID' : '', 'payments_currency' : '', }) api_result = dbsession.table_action(dbmodel.POINT_OF_SALE, 'UPDATE', pointofsale_record, action_filter, auto_commit=True, caller_area=_process_call_area) api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_pointofsale_credit_info(dbsession, pointofsale_record, action_filter={}, caller_area={}): _api_name = "dbapi_pointofsale_credit_info" _api_entity = 'POINT_OF_SALE' _api_action = 'get_pointofsale_credit_info' _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) log_process_input('', 'input_dict', pointofsale_record,**_process_call_area) log_process_input('', 'action_filter', action_filter,**_process_call_area) log_process_input('', 'caller_area', caller_area,**_process_call_area) pointofsale = dbsession.get(dbmodel.POINT_OF_SALE, pointofsale_record, caller_area=_process_call_area) if not pointofsale: msg = f'pointofsale not found' log_process_message('', 'error', msg, **_process_call_area) api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result if not pointofsale.status == 'Active': msg = f"pointofsale not Active.(status:{pointofsale.status})" log_process_message('', 'error', msg, **_process_call_area) api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result pointofsale_id=pointofsale.pointofsale_id merchant_id=pointofsale.merchant_id pointofsale_name = pointofsale.name bank_account_id = pointofsale.bank_account_id bank_subscription_id = pointofsale.bank_subscription_id bank_code = pointofsale.bank_code bank_subscriptionID = pointofsale.bank_subscriptionID bank_accountID = pointofsale.bank_accountID payments_currency = pointofsale.payments_currency pointofsale_record = pointofsale.to_dict() if not bank_accountID: merchant = dbsession.get(dbmodel.MERCHANT, {'merchant_id':merchant_id}, caller_area=_process_call_area) bank_account_id = merchant.bank_account_id bank_subscription_id = merchant.bank_subscription_id bank_code = merchant.bank_code bank_subscriptionID = merchant.bank_subscriptionID bank_accountID = merchant.bank_accountID payments_currency = merchant.payments_currency x = ' from merchant' else: x = ' from point_of_sale' credit_info = { 'pointofsale_id': pointofsale_id, 'pointofsale_name':pointofsale_name, 'bank_account_id':bank_account_id, 'bank_subscription_id':bank_subscription_id, 'bank_code':bank_code, 'bank_subscriptionID':bank_subscriptionID, 'bank_accountID':bank_accountID, 'payments_currency': payments_currency, } msg = f"OK. pointofsale credit info retrieved [{x}]" log_process_message('', 'success', msg, **_process_call_area) api_result = {'api_status': 'success', 'api_message': msg, 'api_data':credit_info, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_interaction(dbsession, action, input_dict, action_filter={}, caller_area={}): _api_name = "dbapi_interaction" _api_entity = 'INTERACTION' _api_action = action _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) if action.upper() in ('START','REQUEST'): return dbapi_interaction_start(dbsession, input_dict, caller_area=caller_area) elif action.upper() == 'ACCEPT': return dbapi_interaction_finish(dbsession, input_dict, caller_area=caller_area) elif action.upper() == 'FINISH': return dbapi_interaction_finish(dbsession, input_dict, caller_area=caller_area) elif action.upper() == 'MESSAGE': return dbapi_interaction_message_add(dbsession, input_dict, caller_area=caller_area) log_process_input('', 'input_dict', input_dict,**_process_call_area) log_process_input('', 'action_filter', action_filter,**_process_call_area) log_process_input('', 'caller_area', caller_area, **_process_call_area) action_result = dbsession.table_action(dbmodel.INTERACTION, action , input_dict, action_filter, auto_commit=True, caller_area=_process_call_area) api_result = action_result api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_interaction_message(dbsession, action, input_dict, action_filter={}, caller_area={}): _api_name = "dbapi_interaction_message" _api_entity = 'INTERACTION_MESSAGE' _api_action = action _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) log_process_input('', 'input_dict', input_dict,**_process_call_area) log_process_input('', 'action_filter', action_filter,**_process_call_area) log_process_input('', 'caller_area', caller_area,**_process_call_area) action_result = dbsession.table_action(dbmodel.INTERACTION_MESSAGE, action , input_dict, action_filter, auto_commit=True, caller_area=_process_call_area) api_result = action_result api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_bank(dbsession, action, input_dict, action_filter={}, caller_area={}): _api_name = "dbapi_bank" _api_entity = 'BANK' _api_action = action _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) log_process_input('', 'input_dict', input_dict,**_process_call_area) log_process_input('', 'action_filter', action_filter,**_process_call_area) log_process_input('', 'caller_area', caller_area,**_process_call_area) action_result = dbsession.table_action(dbmodel.BANK, action , input_dict, action_filter, auto_commit=True, caller_area=_process_call_area) api_result = action_result api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_bank_authorization(dbsession, action, input_dict, action_filter={}, caller_area={}): _api_name = "dbapi_bank_authorization" _api_entity = 'BANK_AUTHORIZATION' _api_action = action _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) log_process_input('', 'input_dict', input_dict,**_process_call_area) log_process_input('', 'action_filter', action_filter,**_process_call_area) log_process_input('', 'caller_area', caller_area,**_process_call_area) action_result = dbsession.table_action(dbmodel.BANK_AUTHORIZATION, action , input_dict, action_filter, auto_commit=True, caller_area=_process_call_area) api_result = action_result api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_bank_subscription(dbsession, action, input_dict, action_filter={}, caller_area={}): _api_name = "dbapi_bank_subscription" _api_entity = 'BANK_SUBSCRIPTION' _api_action = action _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) log_process_input('', 'input_dict', input_dict,**_process_call_area) log_process_input('', 'action_filter', action_filter,**_process_call_area) log_process_input('', 'caller_area', caller_area,**_process_call_area) action_result = dbsession.table_action(dbmodel.BANK_SUBSCRIPTION, action , input_dict, action_filter, auto_commit=True, caller_area=_process_call_area) api_result = action_result api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_bank_account(dbsession, action, input_dict, action_filter={}, caller_area={}): _api_name = "dbapi_bank_account" _api_entity = 'BANK_ACCOUNT' _api_action = action _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) log_process_input('', 'input_dict', input_dict,**_process_call_area) log_process_input('', 'action_filter', action_filter,**_process_call_area) log_process_input('', 'caller_area', caller_area,**_process_call_area) action_result = dbsession.table_action(dbmodel.BANK_ACCOUNT, action , input_dict, action_filter, auto_commit=True, caller_area=_process_call_area) api_result = action_result api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_get_bank_account_id(dbsession, any_accountid, caller_area={}): if not any_accountid: return None _api_name = "dbapi_get_bank_account_id" _api_entity = 'BANK_ACCOUNT' _api_action = 'get' _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) account = dbsession.get(dbmodel.BANK_ACCOUNT, {'bank_account_id': any_accountid}, caller_area=_process_call_area) if account: bank_account_id = account.bank_account_id else: account = dbsession.get(dbmodel.BANK_ACCOUNT, {'bank_accountID': any_accountid}, caller_area=_process_call_area) if account: bank_account_id = account.bank_account_id else: bank_account_id = None return bank_account_id #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_get_bank_code(dbsession, any_bank_id, return_field='bank_id', caller_area={}): if not any_bank_id: return None _api_name = "dbapi_get_bank_code" _api_entity = 'BANK' _api_action = 'get' _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) bank = dbsession.get(dbmodel.BANK, {'bank_id': any_bank_id}, caller_area=_process_call_area) if not bank: bank = dbsession.get(dbmodel.BANK, {'bank_code': any_bank_id}, caller_area=_process_call_area) if not bank: bank = dbsession.get(dbmodel.BANK, {'bank_BIC': any_bank_id}, caller_area=_process_call_area) if not bank: return None if return_field.upper().find('CODE') >=0: return bank.bank_code elif return_field.upper().find('BIC') >=0: return bank.bank_BIC else: return bank.bank_id #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_device_log(dbsession, device_uid, application_name, geolocation_lat, geolocation_lon, client_id, caller_area={}): _api_name="dbapi_device_log" _api_entity = 'DEVICE' _api_action = 'log' _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) log_process_input('', 'device_uid', device_uid,**_process_call_area) log_process_input('', 'application_name', application_name,**_process_call_area) log_process_input('', 'geolocation_lat', geolocation_lat,**_process_call_area) log_process_input('', 'geolocation_lon', geolocation_lon,**_process_call_area) log_process_input('', 'client_id', client_id,**_process_call_area) # print(geolocation_lat,geolocation_lon) # glat=geoloc_to_integer(geolocation_lat) # glon = geoloc_to_integer(geolocation_lon) # print(glat,glon) # glat2=integer_to_geoloc(glat) # glon2 = integer_to_geoloc(glon) # print(glat2,glon2) # geolocation_lat=geoloc_to_integer(geolocation_lat) # geolocation_lon=geoloc_to_integer(geolocation_lon) # print(geolocation_lat,geolocation_lon) now = datetime.datetime.utcnow() application_id = None if not application_name: application_name='?' application = dbsession.get(dbmodel.APPLICATION, {'application_name': application_name}, caller_area=_process_call_area) if application: application_id = application.application_id device_record = {'device_uid': device_uid, 'last_usage_geolocation_lat': geolocation_lat, 'last_usage_geolocation_lon': geolocation_lon, 'last_usage_timestamp': now} usage_record = {'device_uid': device_uid, 'application_name': application_name, 'geolocation_lat': geolocation_lat, 'geolocation_lon': geolocation_lon, 'client_id': client_id} client_device_record = {'device_uid': device_uid, 'client_id': client_id, 'application_name': application_name, 'application_id': application_id, 'last_usage_timestamp': now} device = dbsession.refresh(dbmodel.DEVICE, device_record, auto_commit=False, caller_area=_process_call_area) device_usage = dbsession.refresh(dbmodel.DEVICE_USAGE,usage_record, auto_commit=False, caller_area=_process_call_area) client_device = dbsession.refresh(dbmodel.CLIENT_DEVICE,client_device_record, auto_commit=False, caller_area=_process_call_area) dbsession.commit(**_process_call_area) if client_device: logged_record = client_device.to_dict() if device.times_used <= 1: msg=f"OK. new device logged" else: msg = f"OK. device logged, times_used:{device_usage.times_used}/{client_device.times_used}" log_process_message('', 'success', msg, **_process_call_area) api_result = {'api_status': 'success', 'api_message': msg, 'api_data': logged_record, 'api_action': _api_action.upper(), 'api_name': _api_name} else: msg = f"device logged FAILED" api_result = {'api_status': 'error', 'api_message': msg, 'api_data': {}, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_token_is_valid(dbsession, token, caller_area={}): _api_name = "dbapi_token_is_valid" _api_entity = 'TOKEN' _api_action = 'validation' _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) log_process_input('', 'token', token,**_process_call_area) log_process_input('', 'caller_area', caller_area,**_process_call_area) if type(token) == type(''): input_dict = {'token': token} elif type(token) == type({}): input_dict = token else: msg='invalid token provided' log_process_message('', 'error', msg, **_process_call_area) return False if not input_dict.get('token'): msg='no token provided' log_process_message('', 'error', msg, **_process_call_area) return False token_record = dbsession.get(dbmodel.TOKEN, input_dict, caller_area=_process_call_area) if not token_record: msg = f'access token is NOT valid.(not found)' log_process_message('', 'error', msg, **_process_call_area) return False expiryDT = token_record.expiryDT if not expiryDT: msg = f'access token is NOT valid.(no expiryDT)' log_process_message('', 'error', msg, **_process_call_area) return False #universal time #GMT=Greenwich Mean Time #UTC=Coordinated Universal Time #There is no time difference between Coordinated Universal Time and Greenwich Mean Time #nowString = datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S') #now=datetime.datetime.utcnow() if expiryDT < datetime.datetime.utcnow(): msg = f'access token is NOT valid.(expired)' api_result = False api_result = True log_process_result(_api_msgID, api_result, data_name='access_token_is_valid', **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_token_get_access_token(dbsession, token_request, caller_area={}): _api_name = "dbapi_token_get_access_token" _api_entity = 'TOKEN' _api_action = 'GET_TOKEN' _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) log_process_input('', 'token_request', token_request,**_process_call_area) log_process_input('', 'caller_area', caller_area,**_process_call_area) application_name=token_request.get('application_name') client_id=token_request.get('application_client_id') client_secretKey=token_request.get('application_client_secretKey') application = dbsession.get(dbmodel.APPLICATION, {'application_name': application_name, 'client_id': client_id}, caller_area=_process_call_area) if not application: msg='application not registered' api_result={'api_status': 'error', 'api_message': msg,'api_data':{}} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result if not application.client_id == client_id or not application.client_secretKey == client_secretKey: msg='application credentials not valid' api_result={'api_status': 'error', 'api_message': msg,'api_data':{}} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result token_duration_secs = 3600 #1 hour if token_request.get('token_scope') == 'application_service': token_duration_secs = 3600 #1 hour token_request.update({'duration_seconds':token_duration_secs}) token_request.update({'status':'Active'}) expiryDT = datetime.datetime.utcnow() + datetime.timedelta(seconds=token_duration_secs) token_request.update({'expiryDT': expiryDT}) if 'token' in token_request.keys(): token_request.pop('token') token = dbsession.insert(dbmodel.TOKEN, token_request,auto_commit=True, caller_area=_process_call_area) if not token: msg='token generation failed' api_result={'api_status': 'system error', 'api_message': msg,'api_data':{}} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result token_record = { 'token_type': token.token_type, 'token_scope': token.token_scope, 'grant_type': token.grant_type, 'token': token.token, 'duration_seconds': token.duration_seconds, 'expiryDT': token.expiryDT, } msg='OK. token generated' api_result={'api_status': 'success', 'api_message': msg,'api_data':token_record} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_cleanup_tokens(dbsession, caller_area={}): _api_name = "debapi_cleanup_tokens" _api_entity = 'TOKEN' _api_action = 'CLEANUP' _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) where_expression = {'status': 'Expired'} deleted_result = dbsession.delete_rows(dbmodel.TOKEN, where_expression, auto_commit=True) deleted_rows = deleted_result.get('rows_deleted', 0) #nowString = datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S') #where_expression = f"expiryDT<'{datetime.datetime.utcnow()}'" where_expression = {'expiryDT': {datetime.datetime.utcnow()}} update_dict = {'status': 'Expired'} expired_result = dbsession.update_rows(dbmodel.TOKEN, update_dict,where_expression, auto_commit=True, caller_area=_process_call_area) expired_rows = expired_result.get('rows_updated', 0) msg = f'tokens cleaned with {expired_rows} tokens expired, {deleted_rows} removed.' api_result = {'api_status': 'success', 'api_message': msg, 'rows_expired': expired_rows, 'rows_removed': deleted_rows} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_interaction_start(dbsession, input_dict, caller_area={}): _api_name = "dbapi_interaction_start" _api_entity = 'INTERACTION' _api_action = 'START' _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) log_process_input('', 'input_dict', input_dict,**_process_call_area) log_process_input('', 'caller_area', caller_area,**_process_call_area) #//////////////////////////////////////// originator = None originator_id = None originator_name = None corresponder = None corresponder_id = None corresponder_name = None #//////////////////////////////////////// #step-1: originator (originator, originator_id, originator_name) = find_originator(dbsession, input_dict, _process_call_area) # originator_id = input_dict.get('originator_id') # if originator_id: # xid = dbsession.get(dbmodel.CLIENT, {'client_id':originator_id}, caller_area=_process_call_area) # if xid: # originator='client' # originator_name = xid.email # else: # xid = dbsession.get(dbmodel.POINT_OF_SALE, {'pointofsale_id':originator_id}, caller_area=_process_call_area) # if xid: # originator='pointofsale' # originator_name = xid.name # else: # msg = f'originator not valid' # api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} # log_process_finish(_api_msgID, api_result, **_process_call_area) # return api_result # xoriginator = input_dict.get('originator') # if xoriginator and not xoriginator == originator: # msg = f'originator_id not valid for originator {xoriginator}' # api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} # log_process_finish(_api_msgID, api_result, **_process_call_area) # return api_result # else: # client_id = input_dict.get('client_id') # if client_id: # client=dbsession.get(dbmodel.CLIENT, {'client_id':client_id}, caller_area=_process_call_area) # if not client: # msg = f'client not found' # api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} # log_process_finish(_api_msgID, api_result, **_process_call_area) # return api_result # originator='client' # originator_id=client_id # originator_name = client.email # else: # pointofsale_id = input_dict.get('pointofsale_id') # if pointofsale_id: # pointofsale=dbsession.get(dbmodel.POINT_OF_SALE, {'pointofsale_id':pointofsale_id}, caller_area=_process_call_area) # if not pointofsale: # msg = f'pointofsale not found' # api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} # log_process_finish(_api_msgID, api_result, **_process_call_area) # return api_result # originator='pointofsale' # originator_id=pointofsale_id # originator_name = pointofsale.name # else: # msg = f'no pointofsale or consumer or client defined' # api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} # log_process_finish(_api_msgID, api_result, **_process_call_area) # return api_result if not originator_id: msg = f'originator not defined (pointofsale or client or service_point)' api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result xoriginator = input_dict.get('originator') if xoriginator and not xoriginator == originator: msg = f'originator_id not valid for originator {xoriginator}' api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result input_dict.update({'originator': originator}) input_dict.update({'originator_id': originator_id}) input_dict.update({'originator_name': originator_name}) msg = f'originator set to [{originator_name}]' log_process_message('', 'success', msg, **_process_call_area) #step-2: corresponder (corresponder, corresponder_id, corresponder_name) = find_corresponder(dbsession, input_dict, _process_call_area) # corresponder_id = input_dict.get('corresponder_id') # if not corresponder_id: # msg = f'no corresponder specified' # api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} # log_process_finish(_api_msgID, api_result, **_process_call_area) # return api_result # pointofsale = dbsession.get(dbmodel.POINT_OF_SALE, {'pointofsale_id':corresponder_id}, caller_area=_process_call_area) # if pointofsale: # corresponder='pointofsale' # corresponder_id = pointofsale.pointofsale_id # corresponder_name = pointofsale.name # else: # xid = dbsession.get(dbmodel.CLIENT, {'client_id':corresponder_id}, caller_area=_process_call_area) # if xid: # corresponder='client' # corresponder_id=xid.client_id # corresponder_name = xid.email # else: # msg = f'corresponder not valid' # api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} # log_process_finish(_api_msgID, api_result, **_process_call_area) # return api_result if not corresponder_id: msg = f'corresponder not valid' api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result xcorresponder = input_dict.get('corresponder') if xcorresponder and not xcorresponder == corresponder: msg = f'corresponder_id not valid for corresponder {xcorresponder}' api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result input_dict.update({'corresponder': corresponder}) input_dict.update({'corresponder_id': corresponder_id}) input_dict.update({'corresponder_name': corresponder_name}) msg = f'corresponder set to [{corresponder_name}]' log_process_message('', 'success', msg, **_process_call_area) #step-3: already active filterJson = {"originator": originator, "originator_id": originator_id, "status": 'Active'} active_interactions=dbsession.get_rows(dbmodel.INTERACTION, filterJson, caller_area=_process_call_area) if active_interactions: msg = f'[{len(active_interactions)} active interactions found] for originator [{originator_name}]' log_process_message('', 'warning', msg, **_process_call_area) for active_interaction in active_interactions: interaction_id = active_interaction.interaction_id time_start = active_interaction.row_timestamp time_end = datetime.datetime.utcnow() diff = time_end - time_start duration = diff.days * 24 * 60 * 60 + diff.seconds interaction_rec = active_interaction.to_dict() interaction_rec.update({'status':'canceled','completed_timestamp':time_end,'duration':duration}) active_interaction = dbsession.update(dbmodel.INTERACTION, interaction_rec, auto_commit=True, caller_area=_process_call_area) #step-4: corresponder is available (not active) filterJson = {"corresponder": corresponder, "corresponder_id": corresponder_id, "status": 'Active'} active_interactions=dbsession.get_rows(dbmodel.INTERACTION, filterJson, caller_area=_process_call_area) if active_interactions: msg = f'[{len(active_interactions)} active interaction(s) found] for corresponder [{corresponder_name}]' log_process_message('', 'warning', msg, **_process_call_area) for active_interaction in active_interactions: interaction_id = active_interaction.interaction_id time_start = active_interaction.row_timestamp time_end = datetime.datetime.utcnow() diff = time_end - time_start duration = diff.days * 24 * 60 * 60 + diff.seconds if duration>5*60: # 5 minutes interaction_rec = active_interaction.to_dict() interaction_rec.update({'status':'canceled-timeout','completed_timestamp':time_end,'duration':duration}) active_interaction = dbsession.update(dbmodel.INTERACTION, interaction_rec, auto_commit=True, caller_area=_process_call_area) msg = f'corresponder {corresponder_name} interaction {interaction_id} timed-out and canceled after {duration/60} minutes' log_process_message('', 'warning', msg, **_process_call_area) filterJson = {"corresponder": corresponder, "corresponder_id": corresponder_id, "status": 'Active'} active_interactions=dbsession.get_rows(dbmodel.INTERACTION, filterJson, caller_area=_process_call_area) if active_interactions: msg = f'corresponder {corresponder_name} is not available' log_process_message('', 'error', msg, **_process_call_area) api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #step-5: insert input_dict.update({'status': 'Requested'}) interaction = dbsession.insert(dbmodel.INTERACTION, input_dict, auto_commit=True, caller_area=_process_call_area) if not interaction: msg = f'interaction start failed' api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result interaction_id = interaction.interaction_id #step-6: message interaction_message = { 'interaction_id':interaction_id, 'originator_id':interaction.originator_id, 'originator': interaction.originator, 'originator_name': interaction.originator_name, 'message_type':'start', 'message_record':f"hi. i am {interaction.originator} {interaction.originator_name} and i want to interact with {interaction.corresponder} {interaction.corresponder_name}", 'content_type':'text', 'format':'', 'application_name': input_dict.get('application_name'), 'geolocation_lat': input_dict.get('geolocation_lat'), 'geolocation_lon': input_dict.get('geolocation_lon'), } start_message = dbsession.insert(dbmodel.INTERACTION_MESSAGE, interaction_message, auto_commit=True, caller_area=_process_call_area) if not start_message: msg = f'start message insert failed' log_process_message('', 'error', msg, **_process_call_area) msg = f'interaction start failed (message insert failed)' api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #shalimar #step-6: result interaction_record = interaction.to_dict() msg=f'OK. interaction established between You and {corresponder.upper()} {corresponder_name}' api_result = {'api_status': 'success', 'api_message': msg, 'interaction_id': interaction_id, 'api_data': interaction_record, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_interaction_accept(dbsession, input_dict, caller_area={}): _api_name = "dbapi_interaction_accept" _api_entity = 'INTERACTION' _api_action = 'ACCEPT' _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) log_process_input('', 'input_dict', input_dict,**_process_call_area) log_process_input('', 'caller_area', caller_area,**_process_call_area) interaction_id = input_dict.get('interaction_id') if not interaction_id: msg = f'interaction not defined' log_process_message('', 'error', msg, **_process_call_area) api_result = {'api_status': 'error', 'api_message': msg} log_process_finish(_api_msgID, api_result, **_process_call_area) return interaction = dbsession.get(dbmodel.INTERACTION, {'interaction_id':interaction_id}, caller_area=_process_call_area) if not interaction: msg = f'interaction not found' api_result = {'api_status': 'error', 'api_message': msg} log_process_finish(_api_msgID, api_result, **_process_call_area) return if interaction.status == 'Active': msg = f'interaction is already Active' api_result = {'api_status': 'error', 'api_message': msg} log_process_finish(_api_msgID, api_result, **_process_call_area) return if not interaction.status=='Requested': msg = f'interaction is already [{interaction.status}]' api_result = {'api_status': 'error', 'api_message': msg} log_process_finish(_api_msgID, api_result, **_process_call_area) return #//////////////////////////////////////// originator = None originator_id = None originator_name = None #//////////////////////////////////////// # #step-1: originator # originator_id = input_dict.get('originator_id') # if originator_id: # xid = dbsession.get(dbmodel.CLIENT, {'client_id':originator_id}, caller_area=_process_call_area) # if xid: # originator='client' # originator_name = xid.email # else: # xid = dbsession.get(dbmodel.POINT_OF_SALE, {'pointofsale_id':originator_id}, caller_area=_process_call_area) # if xid: # originator='pointofsale' # originator_name = xid.name # else: # msg = f'originator not valid' # api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} # log_process_finish(_api_msgID, api_result, **_process_call_area) # return api_result # xoriginator = input_dict.get('originator') # if xoriginator and not xoriginator == originator: # msg = f'originator_id not valid for originator {xoriginator}' # log_process_message('', 'error', msg, **_process_call_area) # api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} # log_process_finish(_api_msgID, api_result, **_process_call_area) # return api_result # else: # client_id = input_dict.get('client_id') # if client_id: # client=dbsession.get(dbmodel.CLIENT, {'client_id':client_id}, caller_area=_process_call_area) # if not client: # msg = f'client not found' # api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} # log_process_finish(_api_msgID, api_result, **_process_call_area) # return api_result # originator='client' # originator_id=client_id # originator_name = client.email # else: # pointofsale_id = input_dict.get('pointofsale_id') # if pointofsale_id: # pointofsale=dbsession.get(dbmodel.POINT_OF_SALE, {'pointofsale_id':pointofsale_id}, caller_area=_process_call_area) # if not pointofsale: # msg = f'pointofsale not found' # api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} # log_process_finish(_api_msgID, api_result, **_process_call_area) # return api_result # originator='pointofsale' # originator_id=pointofsale_id # originator_name = pointofsale.name # else: # msg = f'no pointofsale or client defined' # api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} # log_process_finish(_api_msgID, api_result, **_process_call_area) # return api_result (originator, originator_id, originator_name) = find_originator(dbsession, input_dict, _process_call_area) if not originator_id: msg = f'originator not defined (pointofsale or client or service_point)' api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result xoriginator = input_dict.get('originator') if xoriginator and not xoriginator == originator: msg = f'originator_id not valid for originator {xoriginator}' api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result # if not originator or not originator_id: # msg = f'originator(as corresponder) not defined (pointofsale or client)' # log_process_message('', 'error', msg, **_process_call_area) # api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} # log_process_finish(_api_msgID, api_result, **_process_call_area) # return api_result if originator_id == interaction.originator_id: msg = f'accepter [{originator_name}] same as requestor' log_process_message('', 'error', msg, **_process_call_area) api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result if interaction.corresponder_id: if not originator_id == interaction.corresponder_id: msg = f'interaction must be accepted by [{interaction.corresponder}] {interaction.corresponder_name} [not] by [{originator}] {originator_name} ' api_result = {'api_status': 'error', 'api_message': msg} log_process_finish(_api_msgID, api_result, **_process_call_area) return input_dict.update({'corresponder': originator}) input_dict.update({'corresponder_id': originator_id}) input_dict.update({'corresponder_name': originator_name}) msg=f'corresponder: [{originator}] [[{originator_name}]]' log_process_message('', 'success', msg, **_process_call_area) interaction_message = { 'interaction_id': interaction_id, 'originator_id': originator_id, 'originator': originator, 'originator_name': originator_name, 'message_type':'accept', 'message_record':f"hi. i am {originator} {originator_name}. how can i help you Mr. {interaction.originator} {interaction.originator_name}", 'content_type':input_dict.get('content_type','text'), 'format':input_dict.get('format',''), 'application_name': input_dict.get('application_name'), 'geolocation_lat': input_dict.get('geolocation_lat'), 'geolocation_lon': input_dict.get('geolocation_lon'), } message = dbsession.insert(dbmodel.INTERACTION_MESSAGE, interaction_message, auto_commit=True, caller_area=_process_call_area) if not message: msg = f'interaction message add failed' log_process_message('', 'error', msg, **_process_call_area) time_start = interaction.row_timestamp time_end = datetime.datetime.utcnow() diff = time_end - time_start duration = diff.days * 24 * 60 * 60 + diff.seconds interaction_rec = interaction.to_dict() interaction_rec.update({ 'corresponder': originator, 'corresponder_id': originator_id, 'corresponder_name': originator_name, 'status': 'Active', 'last_usage_timestamp': datetime.datetime.utcnow(), 'accept_geolocation_lat':input_dict.get('geolocation_lat'), 'accept_geolocation_lon':input_dict.get('geolocation_lon'), }) interaction = dbsession.update(dbmodel.INTERACTION, interaction_rec, auto_commit=True, caller_area=_process_call_area) if not interaction: msg = f'interaction accept failed' api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_message('', 'error', msg, **_process_call_area) return api_result #step-6: result interaction_rec = interaction.to_dict() msg=f'OK. interaction accepted' api_result = {'api_status': 'success', 'api_message': msg, 'interaction_id': interaction.interaction_id, 'api_data': interaction_rec, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_interaction_finish(dbsession, input_dict, caller_area={}): _api_name = "dbapi_interaction_finish" _api_entity = 'INTERACTION' _api_action = 'FINISH' _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) log_process_input('', 'input_dict', input_dict,**_process_call_area) log_process_input('', 'caller_area', caller_area,**_process_call_area) interaction_id = input_dict.get('interaction_id') if not interaction_id: msg = f'interaction not defined' log_process_message('', 'error', msg, **_process_call_area) api_result = {'api_status': 'error', 'api_message': msg} log_process_finish(_api_msgID, api_result, **_process_call_area) return interaction = dbsession.get(dbmodel.INTERACTION, {'interaction_id':interaction_id}, caller_area=_process_call_area) if not interaction: msg = f'interaction not found' api_result = {'api_status': 'error', 'api_message': msg} log_process_finish(_api_msgID, api_result, **_process_call_area) return if not interaction.status=='Active': msg = f'interaction not Active' api_result = {'api_status': 'error', 'api_message': msg} log_process_finish(_api_msgID, api_result, **_process_call_area) return #//////////////////////////////////////// originator = None originator_id = None originator_name = None #//////////////////////////////////////// #step-1: originator # originator_id = input_dict.get('originator_id') # if originator_id: # xid = dbsession.get(dbmodel.CLIENT, {'client_id':originator_id}, caller_area=_process_call_area) # if xid: # originator='client' # originator_name = xid.email # else: # xid = dbsession.get(dbmodel.POINT_OF_SALE, {'pointofsale_id':originator_id}, caller_area=_process_call_area) # if xid: # originator='pointofsale' # originator_name = xid.name # else: # msg = f'originator not valid' # api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} # log_process_finish(_api_msgID, api_result, **_process_call_area) # return api_result # xoriginator = input_dict.get('originator') # if xoriginator and not xoriginator == originator: # msg = f'originator_id not valid for originator {xoriginator}' # log_process_message('', 'error', msg, **_process_call_area) # api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} # log_process_finish(_api_msgID, api_result, **_process_call_area) # return api_result # else: # client_id = input_dict.get('client_id') # if client_id: # client=dbsession.get(dbmodel.CLIENT, {'client_id':client_id}, caller_area=_process_call_area) # if not client: # msg = f'client not found' # api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} # log_process_finish(_api_msgID, api_result, **_process_call_area) # return api_result # originator='client' # originator_id=client_id # originator_name = client.email # else: # pointofsale_id = input_dict.get('pointofsale_id') # if pointofsale_id: # pointofsale=dbsession.get(dbmodel.POINT_OF_SALE, {'pointofsale_id':pointofsale_id}, caller_area=_process_call_area) # if not pointofsale: # msg = f'pointofsale not found' # api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} # log_process_finish(_api_msgID, api_result, **_process_call_area) # return api_result # originator='pointofsale' # originator_id=pointofsale_id # originator_name = pointofsale.name # else: # msg = f'no pointofsale or client defined' # api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} # log_process_finish(_api_msgID, api_result, **_process_call_area) # return api_result (originator, originator_id, originator_name) = find_originator(dbsession, input_dict, _process_call_area) if not originator_id: msg = f'originator not defined (pointofsale or client or service_point)' api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result xoriginator = input_dict.get('originator') if xoriginator and not xoriginator == originator: msg = f'originator_id not valid for originator {xoriginator}' api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result # if not originator or not originator_id: # msg = f'originator not defined (pointofsale or client)' # log_process_message('', 'error', msg, **_process_call_area) # api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} # log_process_finish(_api_msgID, api_result, **_process_call_area) # return api_result if not (originator_id == interaction.originator_id or originator_id == interaction.corresponder_id): msg = f'invalid originator [{originator_name}] for interaction [{interaction.interaction_id}]' log_process_message('', 'error', msg, **_process_call_area) api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result input_dict.update({'originator': originator}) input_dict.update({'originator_id': originator_id}) input_dict.update({'originator_name': originator_name}) msg=f'originator: [{originator}] [[{originator_name}]]' log_process_message('', 'success', msg, **_process_call_area) interaction_message = { 'interaction_id': interaction_id, 'originator_id': input_dict.get('originator_id', ''), 'originator': input_dict.get('originator', ''), 'originator_name': input_dict.get('originator_name', ''), 'content_type': input_dict.get('content_type', 'text'), 'format': input_dict.get('format', ''), 'application_name': input_dict.get('application_name'), 'geolocation_lat': input_dict.get('geolocation_lat'), 'geolocation_lon': input_dict.get('geolocation_lon'), 'message_type': 'finish', 'message_record':f"goodbye. Thank you for interacting with us.{input_dict.get("originator")} {input_dict.get("originator_name")}", } message = dbsession.insert(dbmodel.INTERACTION_MESSAGE, interaction_message, auto_commit=True, caller_area=_process_call_area) if not message: msg = f'interaction message add failed' log_process_message('', 'error', msg, **_process_call_area) time_start = interaction.row_timestamp time_end = datetime.datetime.utcnow() diff = time_end - time_start duration = diff.days * 24 * 60 * 60 + diff.seconds interaction_rec = interaction.to_dict() interaction_rec.update({'status':'completed','completed_timestamp':time_end,'duration':duration}) interaction = dbsession.update(dbmodel.INTERACTION, interaction_rec, auto_commit=True, caller_area=_process_call_area) if not interaction: msg = f'interaction finish failed' api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_message('', 'error', msg, **_process_call_area) return api_result #step-6: result interaction_rec = interaction.to_dict() msg=f'OK. interaction finish' api_result = {'api_status': 'success', 'api_message': msg, 'interaction_id': interaction.interaction_id, 'api_data': interaction_rec, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_interaction_message_add(dbsession, input_dict, caller_area={}): _api_name = "dbapi_interaction_message_add" _api_entity = 'INTERACTION_MESSAGE' _api_action = 'ADD' _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) log_process_input('', 'input_dict', input_dict,**_process_call_area) log_process_input('', 'caller_area', caller_area,**_process_call_area) interaction_id = input_dict.get('interaction_id') if not interaction_id: msg = f'interaction not defined' log_process_message('', 'error', msg, **_process_call_area) api_result = {'api_status': 'error', 'api_message': msg} log_process_finish(_api_msgID, api_result, **_process_call_area) return interaction = dbsession.get(dbmodel.INTERACTION, {'interaction_id':interaction_id}, caller_area=_process_call_area) if not interaction: msg = f'interaction not found' api_result = {'api_status': 'error', 'api_message': msg} log_process_finish(_api_msgID, api_result, **_process_call_area) return if not interaction.status=='Active': msg = f'interaction not Active' api_result = {'api_status': 'error', 'api_message': msg} log_process_finish(_api_msgID, api_result, **_process_call_area) return #//////////////////////////////////////// originator = None originator_id = None originator_name = None #//////////////////////////////////////// #step-1: originator # originator_id = input_dict.get('originator_id') # if originator_id: # xid = dbsession.get(dbmodel.CLIENT, {'client_id':originator_id}, caller_area=_process_call_area) # if xid: # originator='client' # originator_name = xid.email # else: # xid = dbsession.get(dbmodel.POINT_OF_SALE, {'pointofsale_id':originator_id}, caller_area=_process_call_area) # if xid: # originator='pointofsale' # originator_name = xid.name # else: # msg = f'originator not valid' # api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} # log_process_finish(_api_msgID, api_result, **_process_call_area) # return api_result # xoriginator = input_dict.get('originator') # if xoriginator and not xoriginator == originator: # msg = f'originator_id not valid for originator {xoriginator}' # log_process_message('', 'error', msg, **_process_call_area) # api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} # log_process_finish(_api_msgID, api_result, **_process_call_area) # return api_result # else: # client_id = input_dict.get('client_id') # if client_id: # client=dbsession.get(dbmodel.CLIENT, {'client_id':client_id}, caller_area=_process_call_area) # if not client: # msg = f'client not found' # api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} # log_process_finish(_api_msgID, api_result, **_process_call_area) # return api_result # originator='client' # originator_id=client_id # originator_name = client.email # else: # pointofsale_id = input_dict.get('pointofsale_id') # if pointofsale_id: # pointofsale=dbsession.get(dbmodel.POINT_OF_SALE, {'pointofsale_id':pointofsale_id}, caller_area=_process_call_area) # if not pointofsale: # msg = f'pointofsale not found' # api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} # log_process_finish(_api_msgID, api_result, **_process_call_area) # return api_result # originator='pointofsale' # originator_id=pointofsale_id # originator_name = pointofsale.name # else: # msg = f'no pointofsale or client defined' # api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} # log_process_finish(_api_msgID, api_result, **_process_call_area) # return api_result # if not originator or not originator_id: # msg = f'originator not defined (pointofsale or client)' # log_process_message('', 'error', msg, **_process_call_area) # api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} # log_process_finish(_api_msgID, api_result, **_process_call_area) # return api_result (originator, originator_id, originator_name) = find_originator(dbsession, input_dict, _process_call_area) if not originator_id: msg = f'originator not defined (pointofsale or client or service_point)' api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result xoriginator = input_dict.get('originator') if xoriginator and not xoriginator == originator: msg = f'originator_id not valid for originator {xoriginator}' api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result input_dict.update({'originator': originator}) input_dict.update({'originator_id': originator_id}) input_dict.update({'originator_name': originator_name}) if not (originator_id == interaction.originator_id or originator_id == interaction.corresponder_id): msg = f'invalid originator [{originator_name}] for interaction [{interaction.interaction_id}]' api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result msg=f'originator: [{originator}] [[{originator_name}]]' log_process_message('', 'success', msg, **_process_call_area) interaction_message = { 'interaction_id': interaction_id, 'originator_id': originator_id, 'originator': originator, 'originator_name': originator_name, 'message_type': input_dict.get('message_type', 'message'), 'message_record': input_dict.get('message_record', ''), 'content_type': input_dict.get('content_type', 'text'), 'format': input_dict.get('format', ''), 'application_name': input_dict.get('application_name'), 'geolocation_lat': input_dict.get('geolocation_lat'), 'geolocation_lon': input_dict.get('geolocation_lon'), } message = dbsession.insert(dbmodel.INTERACTION_MESSAGE, interaction_message, auto_commit=True, caller_area=_process_call_area) if not message: msg = f'interaction message add failed' api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #step-6: result message_record = message.to_dict() msg=f'OK. interaction message added' api_result = {'api_status': 'success', 'api_message': msg, 'interaction_message_id': message.interaction_message_id, 'api_data': message_record, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def find_originator(dbsession,input_dict={},caller_area={}): originator = None originator_id = None originator_name = None originator_id = input_dict.get('originator_id') if originator_id: client = dbsession.get(dbmodel.CLIENT, {'client_id':originator_id}, caller_area=caller_area) if client: originator='client' originator_name = client.email else: pointofsale = dbsession.get(dbmodel.POINT_OF_SALE, {'pointofsale_id': originator_id}, caller_area=caller_area) if pointofsale: originator='pointofsale' originator_name = pointofsale.name else: service_point=dbsession.get(dbmodel.SERVICE_POINT, {'servicepoint_id':originator_id}, caller_area=caller_area) if service_point: originator='service_point' originator_id=service_point.service_point_id originator_name = service_point.name else: client_id = input_dict.get('client_id') if client_id: client=dbsession.get(dbmodel.CLIENT, {'client_id':client_id}, caller_area=caller_area) if client: originator='client' originator_id=client_id originator_name = client.email else: pointofsale_id = input_dict.get('pointofsale_id') if pointofsale_id: pointofsale=dbsession.get(dbmodel.POINT_OF_SALE, {'pointofsale_id':pointofsale_id}, caller_area=caller_area) if pointofsale: originator='pointofsale' originator_id=pointofsale_id originator_name = pointofsale.name else: servicepoint_id = input_dict.get('servicepoint_id') if servicepoint_id: service_point=dbsession.get(dbmodel.SERVICE_POINT, {'servicepoint_id':pointofsale_id}, caller_area=caller_area) if service_point: originator='service_point' originator_id=service_point.service_point_id originator_name = service_point.name return (originator, originator_id, originator_name) #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def find_corresponder(dbsession,input_dict={},caller_area={}): corresponder = None corresponder_id = None corresponder_name = None corresponder_id = input_dict.get('corresponder_id') if corresponder_id: client = dbsession.get(dbmodel.CLIENT, {'client_id':corresponder_id}, caller_area=caller_area) if client: corresponder='client' corresponder_name = client.email else: pointofsale = dbsession.get(dbmodel.POINT_OF_SALE, {'pointofsale_id': corresponder_id}, caller_area=caller_area) if pointofsale: corresponder='pointofsale' corresponder_name = pointofsale.name else: service_point=dbsession.get(dbmodel.SERVICE_POINT, {'servicepoint_id':corresponder_id}, caller_area=caller_area) if service_point: corresponder='service_point' corresponder_id=service_point.service_point_id corresponder_name = service_point.name return (corresponder, corresponder_id, corresponder_name) #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def set_msgID(api_name,api_action,api_entity): msgid=f"#C0#api #C9#{api_name}#C0# [{api_entity}]#C0# action [[{api_action.upper()}]]#C0#" return msgid #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def geoloc_to_integer(geoloc): try: d = decimal.Decimal(str(geoloc).replace(",", ".").strip()) except: d = 0 i = int(d * 1000000000) return i #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def integer_to_geoloc(i): try: d = decimal.Decimal(str(i)) except: d = 0 geoloc = d / 1000000000 return geoloc #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: # module initialization #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: module_configuration = retrieve_module_configuration(__file__, module_identityDictionary, module_configuration, print_enabled=consolelog_enabled, filelog_enabled=filelog_enabled, handle_as_init=False) #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: # (print_enabled, filelog_enabled, log_file, errors_file,consolelog_enabled)=get_globals_from_configuration(module_configuration) #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: functions_ids=['dbapi_'] exclude_functions_ids = ['set_msgID', 'set_process_debug_level'] thisModuleObj = sys.modules[__name__] module_configuration.update({'database_apis':[]}) module_configuration = add_apis_to_configuration('database_apis', module_configuration, thisModuleObj, functions_ids, exclude_functions_ids) save_module_configuration(module_identityDictionary, module_configuration, print_enabled=consolelog_enabled, filelog_enabled=filelog_enabled) thisApp.pair_module_configuration('database_apis',module_configuration) #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: save_module_configuration(module_identityDictionary, module_configuration, print_enabled=consolelog_enabled, filelog_enabled=filelog_enabled) #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: if get_module_debug_level(module_id) > 0: apis = thisApp.application_configuration.get('database_apis', {}) for api_name in apis.keys(): api_entry = apis.get(api_name) msg=f'module [[{module_id}]] database api [{api_name} [[[{api_entry}]]]' log_message(msg) #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #import commands # apis = thisApp.application_configuration.get('database_apis', {}) # for api_name in apis.keys(): # api_entry = apis.get(api_name) # msg=f'from {module_id} import {api_name}' # log_message(msg) #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: msg = f'database [ganimides] [[[[module [{module_id}] loaded]]]] with [[version {module_version}]]' if thisApp.get_module_debug_level(module_id): log_message(msg) #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: # main #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: if __name__ == '__main__': #tests/research print(__file__) # caller_area={'aaaa': '11111'} # print('0caller_area=', caller_area) # test_api(caller_area, call_level=-1) # print('4caller_area=', caller_area) #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
# -*- coding: utf-8 -*- #https://www.pythoncentral.io/series/python-sqlalchemy-database-tutorial/ import os import sys if not (os.path.dirname(os.path.dirname(__file__)) in sys.path): sys.path.append(os.path.dirname(os.path.dirname(__file__))) if not (os.path.dirname(__file__) in sys.path): sys.path.append(os.path.dirname(__file__)) import datetime import decimal # # from _serverApp import thisApp from _serverApp import get_debug_option_as_level, get_debug_files, log_message, retrieve_module_configuration, get_globals_from_configuration, save_module_configuration,get_module_debug_level from _serverApp import log_process_start, log_process_finish, log_process_message, log_process_result,log_process_data, log_process_input, log_process_output from _serverApp import set_process_identity_dict, set_process_caller_area, add_apis_to_configuration from _serverApp import build_process_signature, build_process_call_area, get_debug_level, get_debug_files import _database_ganimides_model as dbmodel #import ganimides_openBankingAPI as bankingapi #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #::: module ::: #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: module_Function = 'database adapter' module_ProgramName = 'database api' module_BaseTimeStamp = datetime.datetime.now() module_folder = os.getcwd() module_color = thisApp.Fore.LIGHTMAGENTA_EX module_folder = os.path.dirname(__file__) module_ProgramName = os.path.splitext(os.path.basename(__file__))[0] module_id = f'{module_ProgramName}' module_eyecatch = module_ProgramName module_version = 0.1 module_log_file_name = module_ProgramName+'.log' module_errors_file_name = os.path.splitext(os.path.basename(module_log_file_name))[0]+'_errors.log' module_versionString = f'{module_id} version {module_version}' module_file = __file__ log_file=thisApp.log_file_name print_enabled = thisApp.CONSOLE_ON and thisApp.DEBUG_ON consolelog_enabled = thisApp.CONSOLE_ON and thisApp.DEBUG_ON filelog_enabled = thisApp.FILELOG_ON module_is_externally_configurable = True module_identityDictionary = { 'module_file':__file__, 'module_Function':module_Function, 'module_ProgramName':module_ProgramName, 'module_BaseTimeStamp':module_BaseTimeStamp, 'module_folder':module_folder, 'module_color':module_color, 'module_id':module_id, 'module_eyecatch':module_eyecatch, 'module_version':module_version, 'module_versionString':module_versionString, 'module_log_file_name':module_log_file_name, 'module_errors_file_name': module_errors_file_name, 'consolelog_enabled': consolelog_enabled, 'filelog_enabled': filelog_enabled, 'module_is_externally_configurable':module_is_externally_configurable, } #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: # configuration #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: module_configuration = { } #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: # api services : database apis #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_device(dbsession, action, input_dict, action_filter={}, caller_area={}): _api_name = "dbapi_device" _api_entity = 'DEVICE' _api_action = action _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) if action.upper in ('REGISTER','UNREGISTER'): return dbapi_device_register_unregister(dbsession, action, input_dict, action_filter, caller_area=_process_call_area) log_process_input('', 'input_dict', input_dict,**_process_call_area) log_process_input('', 'action_filter', action_filter,**_process_call_area) log_process_input('', 'caller_area', caller_area,**_process_call_area) action_result = dbsession.table_action(dbmodel.DEVICE, action, input_dict, action_filter, auto_commit=True, caller_area=_process_call_area) api_result = action_result api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_device_register_unregister(dbsession, action, input_dict, action_filter={}, caller_area={}): _api_name="dbapi_device_register_unregister" _api_entity = 'DEVICE' _api_action = action _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) log_process_input('', 'input_dict', input_dict,**_process_call_area) log_process_input('', 'action_filter', action_filter,**_process_call_area) log_process_input('', 'caller_area', caller_area,**_process_call_area) actions_supported=('REGISTER', 'UNREGISTER') now = datetime.datetime.utcnow() if action.upper() not in actions_supported: msg = f"action '{action}' not supported. {actions_supported}" api_result = {'api_status': 'error', 'api_message': msg, 'api_data': actions_supported, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result device = dbsession.get(dbmodel.DEVICE, input_dict, caller_area=_process_call_area) if not device: device_record = device.valid_fields_dictionary(input_dict) msg = f"invalid device" api_result = {'api_status': 'error', 'api_message': msg, 'api_data': device_record, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result client = dbsession.get(dbmodel.CLIENT, input_dict, caller_area=_process_call_area) if not client: client_record = client.valid_fields_dictionary(input_dict) msg = f"invalid client" api_result = {'api_status': 'error', 'api_message': msg, 'api_data': client_record, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result client_id = client.client_id if action.upper() in ('REGISTER'): status = 'Registered' xx = 'for' else: status = 'UnRegistered' xx = 'from' registered_apps=[] if input_dict.get('applications', '').upper() in ('*', 'ALL') \ or input_dict.get('application', '').upper() in ('*', 'ALL') \ or input_dict.get('application_name', '').upper() in ('*', 'ALL'): CLIENT_DEVICE = dbmodel.CLIENT_DEVICE client_devices = dbsession.query(CLIENT_DEVICE).filter(CLIENT_DEVICE.device_uid == device.device_uid, CLIENT_DEVICE.client_id == client_id, CLIENT_DEVICE.status != status).all() if len(client_devices) <= 0: client_devices = dbsession.query(CLIENT_DEVICE).filter(CLIENT_DEVICE.device_uid == device.device_uid, CLIENT_DEVICE.client_id == client_id).all() client_device_records = dbsession.rows_to_dict(CLIENT_DEVICE, client_devices) msg = f"device already {status.upper()} {xx} usage by all applications" api_result = {'api_status': 'success', 'api_message': msg, 'api_data': client_device_records, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result for client_device in client_devices: client_device.status = status application = dbsession.get(dbmodel.APPLICATION, {'application_name': client_device.application_name}, caller_area=_process_call_area) registered_apps.append(application.application_name) dbsession.commit(**_process_call_area) client_device_records = dbsession.rows_to_dict(CLIENT_DEVICE, client_devices) else: application = dbsession.get(dbmodel.APPLICATION, input_dict, caller_area=_process_call_area) if not application: application_record = application.valid_fields_dictionary(input_dict) msg = f"invalid application" api_result = {'api_status': 'error', 'api_message': msg, 'api_data': application_record, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result client_device_record = {'device_uid': device.device_uid, 'client_id': client_id, 'application_name': application.application_name, 'last_usage_timestamp': now, 'status': status} client_device = dbsession.get(dbmodel.CLIENT_DEVICE, client_device_record, caller_area=_process_call_area) if client_device: if client_device.status == status: msg = f"device already {client_device.status.upper()} {xx} usage by application '{client_device.application_name}'" client_device_records = [client_device.to_dict()] api_result = {'api_status': 'success', 'api_message': msg, 'api_data': client_device_records, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result client_device = dbsession.refresh(dbmodel.CLIENT_DEVICE, client_device_record, auto_commit=True, caller_area=_process_call_area) registered_apps.append(application.application_name) client_device_records = [client_device.to_dict()] row_count = len(client_device_records) x='' if row_count > 1: x = 's' msg = f"device {status.upper()} {xx} usage by application{x} {registered_apps}" api_result = {'api_status': 'success', 'api_message': msg, 'api_data': client_device_records, 'api_data_rows': row_count, 'api_action': _api_action.upper(), 'api_name':_api_name } log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_device_usage(dbsession, action, input_dict, action_filter={}, caller_area={}): _api_name = "dbapi_device_usage" _api_entity = 'DEVICE' _api_action = action _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) log_process_input('', 'input_dict', input_dict,**_process_call_area) log_process_input('', 'action_filter', action_filter,**_process_call_area) log_process_input('', 'caller_area', caller_area,**_process_call_area) action_result = dbsession.table_action(dbmodel.DEVICE_USAGE, action, input_dict, action_filter, auto_commit=True, caller_area=_process_call_area) api_result = action_result api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_client(dbsession, action, input_dict, action_filter={}, caller_area={}): _api_name = "dbapi_cient" _api_entity = 'CLIENT' _api_action = action _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) log_process_input('', 'input_dict', input_dict,**_process_call_area) log_process_input('', 'action_filter', action_filter,**_process_call_area) log_process_input('', 'caller_area', caller_area, **_process_call_area) if action.upper().replace('_', '-') in ('SEND-CONFIRMATION-EMAIL', 'SEND-CONFIRMATION-SMS'): client = dbsession.get(dbmodel.CLIENT, action_filter, caller_area=_process_call_area) if not client: msg = f'client not found' api_result = {'api_status': 'error', 'api_message': msg, 'api_data': input_dict} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result client_dict=client.to_dict() if client.confirmed and client.status=='Active': msg = f'client {client.email} already confirmed' api_result = {'api_status': 'success', 'api_message': msg, 'api_data': client_dict} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result if action.upper().replace('_', '-') in ('SEND-CONFIRMATION-EMAIL'): if not client.email: msg = f'email is missing' api_result = {'api_status': 'error', 'api_message': msg, 'api_data': client_dict} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #ok = send_confirmation_email(client.email) ok=True if ok: confirm_filter = {'email': client.email, 'mobile': ''} confirmation = dbsession.get(dbmodel.CLIENT_CONFIRMATION, confirm_filter, caller_area=_process_call_area) if not confirmation: confirm_dict = {'email': client.email, 'mobile': '', 'status': 'Sent'} xapi_result = dbsession.table_action(dbmodel.CLIENT_CONFIRMATION, 'refresh' , confirm_dict, {}, auto_commit=True, caller_area=_process_call_area) if not confirmation.status=='Confirmed': confirm_dict = {'email': client.email, 'mobile': '', 'status': 'Sent', 'send_timestamp': datetime.datetime.utcnow()} xapi_result = dbsession.table_action(dbmodel.CLIENT_CONFIRMATION, 'refresh' , confirm_dict, {}, auto_commit=True, caller_area=_process_call_area) msg = f'OK. a confirmation email sent to {client.email}' api_result = {'api_status': 'success', 'api_message': msg, 'api_data': client_dict} api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result else: msg = f'FAILED to send confirmation email to {client.email}. retry' api_result = {'api_status': 'error', 'api_message': msg, 'api_data': client_dict} api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result if action.upper().replace('_', '-') in ('SEND-CONFIRMATION-SMS'): if not client.mobile: msg = f'mobile has not been defined' api_result = {'api_status': 'error', 'api_message': msg, 'api_data': client_dict} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #ok = send_confirmation_sms(client.mobile) ok=True if ok: confirm_filter = {'mobile': client.mobile, 'email': ''} confirmation = dbsession.get(dbmodel.CLIENT_CONFIRMATION, confirm_filter, caller_area=_process_call_area) if not confirmation: confirm_dict = {'mobile': client.mobile, 'email': '', 'status': 'Sent'} xapi_result = dbsession.table_action(dbmodel.CLIENT_CONFIRMATION, 'refresh' , confirm_dict, {}, auto_commit=True, caller_area=_process_call_area) if not confirmation.status=='Confirmed': confirm_dict = {'mobile': client.mobile, 'email': '', 'status': 'Sent', 'send_timestamp': datetime.datetime.utcnow()} xapi_result = dbsession.table_action(dbmodel.CLIENT_CONFIRMATION, 'refresh' , confirm_dict, {}, auto_commit=True, caller_area=_process_call_area) msg = f'OK. a confirmation sms sent to {client.mobile}' api_result = {'api_status': 'success', 'api_message': msg, 'api_data': client_dict} api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result else: msg = f'FAILED to send confirmation sms to {client.mobile}. retry' api_result = {'api_status': 'error', 'api_message': msg, 'api_data': client_dict} api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result api_result = dbsession.table_action(dbmodel.CLIENT, action , input_dict, action_filter, auto_commit=True, caller_area=_process_call_area) if not api_result.get('api_status') == 'success': log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result if action.upper() in ('UPDATE', 'REFRESH', 'REGISTER', 'ACTIVATE', 'DEACTIVATE', 'CONFIRM'): client_dict = api_result.get('api_data', {}) client_id = client_dict.get('client_id') client_type = client_dict.get('client_type') update_dict = { #'client_id': client_dict.get('client_id'), 'status': client_dict.get('status'), 'email': client_dict.get('email'), 'confirmed': client_dict.get('confirmed'), 'client_status': client_dict.get('status'), 'client_email': client_dict.get('email'), 'client_mobile': client_dict.get('mobile'), 'client_name': client_dict.get('name'), 'client_confirmed': client_dict.get('confirmed'), 'confirmed_timestamp': client_dict.get('confirmed_timestamp'), } xaction = 'update_rows' action_filter = {'client_id': client_id} if client_id and client_type: if client_type == 'merchant': xapi_result = dbsession.table_action(dbmodel.MERCHANT, xaction , update_dict, action_filter, auto_commit=True, caller_area=_process_call_area) elif client_type == 'subscriber': xapi_result = dbsession.table_action(dbmodel.SUBSCRIPTION, xaction , update_dict, action_filter, auto_commit=True, caller_area=_process_call_area) elif client_type == 'customer_service_assistant': xapi_result = dbsession.table_action(dbmodel.CUSTOMER_SERVICE_ASSISTANT, xaction , update_dict, action_filter, auto_commit=True, caller_area=_process_call_area) xapi_result = dbsession.table_action(dbmodel.APPLICATION_USER, xaction , update_dict, action_filter, auto_commit=True, caller_area=_process_call_area) # if action.upper() in ('CONFIRM'): # if input_dict.get('mobile_confirmation_sms'): # # client_dict = api_result.get('api_data', {}) # mobile=client_dict.get('mobile') # confirm_filter = {'mobile': mobile, 'email': ''} # confirmation = dbsession.get(dbmodel.CLIENT_CONFIRMATION, confirm_filter, caller_area=_process_call_area) # if not confirmation: # confirm_dict = {'mobile': mobile, 'email': '', 'status': 'Confirmed', 'confirmed_timestamp': datetime.datetime.utcnow(),'confirmed':1} # xapi_result = dbsession.table_action(dbmodel.CLIENT_CONFIRMATION, 'refresh' , confirm_dict, {}, auto_commit=True, caller_area=_process_call_area) # if not confirmation.status=='Confirmed': # confirm_dict = {'mobile': mobile, 'email': '', 'status': 'Confirmed', 'confirmed_timestamp': datetime.datetime.utcnow(),'confirmed':1} # xapi_result = dbsession.table_action(dbmodel.CLIENT_CONFIRMATION, 'refresh' , confirm_dict, {}, auto_commit=True, caller_area=_process_call_area) # elif input_dict.get('email_confirmation_email'): # # client_dict = api_result.get('api_data', {}) # email=client_dict.get('email') # confirm_filter = {'mobile': '', 'email': email} # confirmation = dbsession.get(dbmodel.CLIENT_CONFIRMATION, confirm_filter, caller_area=_process_call_area) # if not confirmation: # confirm_dict = {'mobile': '', 'email': email, 'status': 'Confirmed', 'confirmed_timestamp': datetime.datetime.utcnow(),'confirmed':1} # xapi_result = dbsession.table_action(dbmodel.CLIENT_CONFIRMATION, 'refresh' , confirm_dict, {}, auto_commit=True, caller_area=_process_call_area) # if not confirmation.status=='Confirmed': # confirm_dict = {'mobile': '', 'email': email, 'status': 'Confirmed', 'confirmed_timestamp': datetime.datetime.utcnow(),'confirmed':1} # xapi_result = dbsession.table_action(dbmodel.CLIENT_CONFIRMATION, 'refresh' , confirm_dict, {}, auto_commit=True, caller_area=_process_call_area) # else: # # client_dict = api_result.get('api_data', {}) # email=client_dict.get('email') # confirm_filter = {'mobile': '', 'email': email} # confirmation = dbsession.get(dbmodel.CLIENT_CONFIRMATION, confirm_filter, caller_area=_process_call_area) # if not confirmation: # confirm_dict = {'mobile': '', 'email': email, 'status': 'Confirmed', 'confirmed_timestamp': datetime.datetime.utcnow(),'confirmed':1} # xapi_result = dbsession.table_action(dbmodel.CLIENT_CONFIRMATION, 'refresh' , confirm_dict, {}, auto_commit=True, caller_area=_process_call_area) # if not confirmation.status=='Confirmed': # confirm_dict = {'mobile': '', 'email': email, 'status': 'Confirmed', 'confirmed_timestamp': datetime.datetime.utcnow(),'confirmed':1} # xapi_result = dbsession.table_action(dbmodel.CLIENT_CONFIRMATION, 'refresh' , confirm_dict, {}, auto_commit=True, caller_area=_process_call_area) api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_client_device(dbsession, action, input_dict, action_filter={}, caller_area={}): _api_name = "dbapi_client_device" _api_entity = 'DEVICE' _api_action = action _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) if action.upper in ('REGISTER','UNREGISTER'): return dbapi_device_register_unregister(dbsession, action, input_dict, action_filter, caller_area=_process_call_area) log_process_input('', 'input_dict', input_dict,**_process_call_area) log_process_input('', 'action_filter', action_filter,**_process_call_area) log_process_input('', 'caller_area', caller_area,**_process_call_area) action_result = dbsession.table_action(dbmodel.CLIENT_DEVICE, action, input_dict, action_filter, auto_commit=True, caller_area=_process_call_area) api_result = action_result api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_verification(dbsession, action, input_dict, action_filter={}, caller_area={}): _api_name = "dbapi_verification" _api_entity = 'VERIFICATION' _api_action = action _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) log_process_input('', 'input_dict', input_dict,**_process_call_area) log_process_input('', 'action_filter', action_filter,**_process_call_area) log_process_input('', 'caller_area', caller_area,**_process_call_area) action_result = dbsession.table_action(dbmodel.VERIFICATION, action, input_dict, action_filter, auto_commit=True, caller_area=_process_call_area) api_result = action_result api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_email_confirmation(dbsession, input_dict, action_filter={}, caller_area={}): _api_name = "dbapi_email_confirmation" _api_entity = 'email' _api_action = 'confirm' _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) log_process_input('', 'input_dict', input_dict,**_process_call_area) log_process_input('', 'action_filter', action_filter,**_process_call_area) log_process_input('', 'caller_area', caller_area,**_process_call_area) verification = dbsession.get(dbmodel.VERIFICATION, input_dict, caller_area=_process_call_area) if not verification: msg = f'email verification failed' api_result = {'api_status': 'error', 'api_message': msg} api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result if not verification.status=='Confirmed': action_filter = {'verification_id': verification.verification_id} update_dict = {'status': 'Confirmed', 'verified': 1, 'verification_timestamp': datetime.datetime.utcnow()} action_result = dbsession.table_action(dbmodel.VERIFICATION,'update', update_dict, action_filter, auto_commit=True, caller_area=_process_call_area) api_result = action_result api_result.update({'api_action': _api_action, 'api_name': _api_name}) client_id = verification.client_id client=dbsession.get(dbmodel.CLIENT, {'client_id':client_id}, caller_area=_process_call_area) if not client: msg = f'mobile verification failed (client_id not found)' api_result = {'api_status': 'error', 'api_message': msg} api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result if not client.email_confirmed or not client.confirmed: update_record = {'client_id': client_id, 'email_confirmed': 1, 'email_confirmed_timestamp': datetime.datetime.utcnow(), 'confirmed': 1} dbreply = dbsession.table_action(dbmodel.CLIENT, 'update', update_record, {'client_id': client_id}, auto_commit=True, caller_area=_process_call_area) client_rec=client.to_dict() if not dbreply.get('api_status')=='success': msg = f'email verification failed (client update failed)' api_result = {'api_status': 'error', 'api_message': msg, 'api_data': client_rec} api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result else: msg = f'OK. client email confirmed' api_result = {'api_status': 'success', 'api_message': msg,'api_data':client_rec} api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result else: client_rec=client.to_dict() msg = f'OK. client email already confirmed' api_result = {'api_status': 'success', 'api_message': msg,'api_data':client_rec} api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_mobile_confirmation(dbsession, input_dict, action_filter={}, caller_area={}): _api_name = "dbapi_mobile_confirmation" _api_entity = 'mobile' _api_action = 'confirm' _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) log_process_input('', 'input_dict', input_dict,**_process_call_area) log_process_input('', 'action_filter', action_filter,**_process_call_area) log_process_input('', 'caller_area', caller_area,**_process_call_area) verification = dbsession.get(dbmodel.VERIFICATION, input_dict, caller_area=_process_call_area) if not verification: msg = f'mobile verification failed' api_result = {'api_status': 'error', 'api_message': msg} api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result if not verification.status=='Confirmed': action_filter = {'verification_id': verification.verification_id} update_dict = {'status': 'Confirmed', 'verified': 1, 'verification_timestamp': datetime.datetime.utcnow()} action_result = dbsession.table_action(dbmodel.VERIFICATION,'update', update_dict, action_filter, auto_commit=True, caller_area=_process_call_area) api_result = action_result api_result.update({'api_action': _api_action, 'api_name': _api_name}) client_id = verification.client_id client=dbsession.get(dbmodel.CLIENT, {'client_id':client_id}, caller_area=_process_call_area) if not client: msg = f'mobile verification failed (client_id not found)' api_result = {'api_status': 'error', 'api_message': msg} api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result if not client.mobile_confirmed or not client.confirmed: update_record = {'client_id': client_id, 'mobile_confirmed': 1, 'mobile_confirmed_timestamp': datetime.datetime.utcnow(), 'confirmed': 1} dbreply = dbsession.table_action(dbmodel.CLIENT, 'update', update_record, {'client_id': client_id}, auto_commit=True, caller_area=_process_call_area) client_rec=client.to_dict() if not dbreply.get('api_status')=='success': msg = f'mobile verification failed (client update failed)' api_result = {'api_status': 'error', 'api_message': msg, 'api_data': client_rec} api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result else: msg = f'OK. client mobile confirmed' api_result = {'api_status': 'success', 'api_message': msg,'api_data':client_rec} api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result else: client_rec=client.to_dict() msg = f'OK. client mobile already confirmed' api_result = {'api_status': 'success', 'api_message': msg,'api_data':client_rec} api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_api(dbsession, action, input_dict, action_filter={}, caller_area={}): _api_name = "dbapi_api" _api_entity = 'API' _api_action = action _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) if action.upper() in ('REGISTER', 'UNREGISTER'): api_result = dbapi_api_register_unregister(dbsession, action, input_dict, action_filter, caller_area=_process_call_area) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result log_process_input('', 'input_dict', input_dict,**_process_call_area) log_process_input('', 'action_filter', action_filter,**_process_call_area) log_process_input('', 'caller_area', caller_area,**_process_call_area) action_result = dbsession.table_action(dbmodel.API, action, input_dict, action_filter, auto_commit=True, caller_area=_process_call_area) api_result = action_result api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_api_register_unregister(dbsession, action, input_dict, action_filter={}, caller_area={}): _api_name = "dbapi_api_register_unregister" _api_entity = 'APPLICATION API' _api_action = action _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) if action.upper() not in ('REGISTER','UNREGISTER'): msg = f'invalid action [[{action}]] requested. use REGISTER or UNREGISTER' api_result = {'api_status': 'error', 'api_message': msg, 'api_data': input_dict} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result log_process_input('', 'input_dict', input_dict,**_process_call_area) log_process_input('', 'action_filter', action_filter,**_process_call_area) log_process_input('', 'caller_area', caller_area,**_process_call_area) if _api_action.upper() == 'REGISTER': api=dbsession.get(dbmodel.API, input_dict, caller_area=_process_call_area) if not api: msg = f'api not found' api_result = {'api_status': 'error', 'api_message': msg, 'api_data': input_dict} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result if not api.status=='Active': msg = f'api not Active' api_result = {'api_status': 'error', 'api_message': msg, 'api_data': input_dict} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result application=dbsession.get(dbmodel.APPLICATION, input_dict, caller_area=_process_call_area) if not application: msg = f'application not found' api_result = {'api_status': 'error', 'api_message': msg, 'api_data': input_dict} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result if not application.status=='Active': msg = f'application not Active' api_result = {'api_status': 'error', 'api_message': msg, 'api_data': input_dict} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result input_dict.update({'api_id':api.api_id}) input_dict.update({'api_name':api.api_name}) input_dict.update({'application_id': application.application_id}) input_dict.update({'application_name': application.application_name}) input_dict.update({'subscription_id': application.subscription_id}) action_filter={} api_registered = dbsession.get(dbmodel.APPLICATION_API, input_dict, caller_area=_process_call_area) if api_registered: input_dict.update({'application_api_id': api_registered.application_api_id}) action_filter = {'application_api_id': api_registered.application_api_id} input_dict.update({'status': 'Active'}) action='REFRESH' action_result = dbsession.table_action(dbmodel.APPLICATION_API, action , input_dict, action_filter, auto_commit=True, caller_area=_process_call_area) api_result = action_result api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result elif _api_action.upper() == 'UNREGISTER': api=dbsession.get(dbmodel.API, input_dict, caller_area=_process_call_area) if api: input_dict.update({'api_id':api.api_id}) input_dict.update({'api_name':api.api_name}) application=dbsession.get(dbmodel.APPLICATION, input_dict, caller_area=_process_call_area) if application: input_dict.update({'application_id': application.application_id}) input_dict.update({'application_name': application.application_name}) api_registered = dbsession.get(dbmodel.APPLICATION_API, input_dict, caller_area=_process_call_area) if not api_registered: msg = f'record not found' api_result = {'api_status': 'error', 'api_message': msg, 'api_data': input_dict} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result input_dict.update({'application_api_id': api_registered.application_api_id}) input_dict.update({'status':'Unregistered'}) action_filter={'application_api_id': api_registered.application_api_id} action='UPDATE' action_result = dbsession.table_action(dbmodel.APPLICATION_API, action, input_dict, action_filter, auto_commit=True, caller_area=_process_call_area) api_result = action_result api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result else: msg = f'invalid action [[{action}]] requested. use REGISTER or UNREGISTER' api_result = {'api_status': 'error', 'api_message': msg, 'api_data': input_dict} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_application(dbsession, action, input_dict, action_filter={}, caller_area={}): _api_name = "dbapi_application" _api_entity = 'APPLICATION' _api_action = action _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) if action.upper() in ('API_REGISTER', 'API_UNREGISTER'): xaction=action.upper().replace('API_','') return dbapi_api_register_unregister(dbsession, xaction, input_dict, action_filter, caller_area=_process_call_area) log_process_input('', 'input_dict', input_dict,**_process_call_area) log_process_input('', 'action_filter', action_filter,**_process_call_area) log_process_input('', 'caller_area', caller_area,**_process_call_area) if action.upper() == 'VALIDATE' or action.upper() == 'VALIDATE_CREDENTIALS': application_name=input_dict.get('application_name') if not application_name: application_name=action_filter.get('application_name') client_id=input_dict.get('client_id') if not client_id: client_id=input_dict.get('application_client_id') if not client_id: client_id=action_filter.get('client_id') if not client_id: client_id=action_filter.get('application_client_id') client_secretKey = input_dict.get('client_secretKey') if not client_secretKey: client_secretKey=input_dict.get('application_client_secretKey') if not client_secretKey: client_secretKey=action_filter.get('client_secretKey') if not client_secretKey: client_secretKey=action_filter.get('application_client_secretKey') return dbapi_application_credentials_are_valid(dbsession, application_name, client_id, client_secretKey) if action.upper() in ('ADD','INSERT','REGISTER','REFRESH'): if not input_dict.get('application_name'): msg = f'application name not defined' api_result = {'api_status': 'error', 'api_message': msg, 'api_data': input_dict} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result if not input_dict.get('subscription_id') and not input_dict.get('client_id'): msg = f'subscription not defined' api_result = {'api_status': 'error', 'api_message': msg, 'api_data': input_dict} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result subscription = dbsession.get(dbmodel.SUBSCRIPTION, input_dict, caller_area=_process_call_area) if not subscription: client=dbsession.get(dbmodel.CLIENT, input_dict, caller_area=_process_call_area) if client: input_dict.update({'client_id': client.client_id}) subscription = dbsession.get(dbmodel.SUBSCRIPTION, input_dict, caller_area=_process_call_area) if not subscription: msg = f'subscription not found' api_result = {'api_status': 'error', 'api_message': msg, 'api_data': input_dict} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result if not subscription.status=='Active': msg = f'subscription not Active' api_result = {'api_status': 'error', 'api_message': msg, 'api_data': input_dict} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result app_rec={'application_name':input_dict.get('application_name')} application = dbsession.get(dbmodel.APPLICATION, app_rec, caller_area=_process_call_area) if application: if not application.subscription_id == subscription.subscription_id: msg = f'application {application.application_name} already in used. try another name' api_result = {'api_status': 'error', 'api_message': msg, 'api_data': input_dict} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result subscription_record = subscription.to_dict() input_dict.update(subscription_record) client=dbsession.get(dbmodel.CLIENT, input_dict, caller_area=_process_call_area) if not client: msg = f'client not found' api_result = {'api_status': 'systemerror', 'api_message': msg, 'api_data': input_dict} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result if not client.status=='Active': msg = f'client not Active' api_result = {'api_status': 'system error', 'api_message': msg, 'api_data': input_dict} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result action_result = dbsession.table_action(dbmodel.APPLICATION, action , input_dict, action_filter, auto_commit=True, caller_area=_process_call_area) api_result = action_result api_result.update({'api_action': _api_action, 'api_name': _api_name}) if api_result.get('api_status') == 'success': user_dict=api_result.get('api_data') user_dict.update({'user_role':'owner'}) dbapi_application_USER(dbsession, 'register', user_dict,caller_area=_process_call_area) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_application_credentials_are_valid(dbsession, application_name, client_id, client_secretKey ,caller_area={}): _api_name = "dbapi_application_credentials_are_valid" _api_entity = 'APPLICATION' _api_action = 'validation' _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) log_process_input('', 'application_name', application_name,**_process_call_area) log_process_input('', 'client_id', client_id,**_process_call_area) log_process_input('', 'client_secretKey', client_secretKey,**_process_call_area) application=dbsession.get(dbmodel.APPLICATION, {'application_name': application_name}, caller_area=_process_call_area) if not application: api_result=False else: if not application.client_id == client_id or not application.client_secretKey == client_secretKey: api_result=False else: api_result=True log_process_result(_api_msgID, api_result, data_name='application_credentials_are_valid', **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_application_api(dbsession, action, input_dict, action_filter={}, caller_area={}): _api_name = "dbapi_application_api" _api_entity = 'APPLICATION_API' _api_action = action _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) log_process_input('', 'input_dict', input_dict,**_process_call_area) log_process_input('', 'action_filter', action_filter,**_process_call_area) log_process_input('', 'caller_area', caller_area,**_process_call_area) action_result = dbsession.table_action(dbmodel.APPLICATION_API, action , input_dict, action_filter, auto_commit=True, caller_area=_process_call_area) api_result = action_result api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_application_USER(dbsession, action, input_dict, action_filter={}, caller_area={}): _api_name = "dbapi_application_user" _api_entity = 'APPLICATION_USER' _api_action = action _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) log_process_input('', 'input_dict', input_dict,**_process_call_area) log_process_input('', 'action_filter', action_filter,**_process_call_area) log_process_input('', 'caller_area', caller_area,**_process_call_area) if action.upper() in ('REGISTER', 'UNREGISTER', 'ADD', 'REFRESH'): if not input_dict.get('user_role'): msg = f'user role not defined' log_process_message('', 'error', msg, **_process_call_area) api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result client_id = input_dict.get('client_id') if client_id: client = dbsession.get(dbmodel.CLIENT, {'client_id': client_id}, caller_area=_process_call_area) else: client = dbsession.refresh(dbmodel.CLIENT, input_dict, auto_commit=True, caller_area=_process_call_area) if not client: msg = f'client not found' log_process_message('', 'error', msg, **_process_call_area) api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result if not client.status == 'Active': msg = f"client not Active.(status:{client.status})" log_process_message('', 'warning', msg, **_process_call_area) # api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} # log_process_finish(_api_msgID, api_result, **_process_call_area) #return api_result application_name=input_dict.get('application_name') application = dbsession.get(dbmodel.APPLICATION, {'application_name': application_name}, caller_area=_process_call_area) if not application: application_id=input_dict.get('application_id') application = dbsession.get(dbmodel.APPLICATION, {'application_id': application_id}, caller_area=_process_call_area) if not application: msg = f'application not found' log_process_message('', 'error', msg, **_process_call_area) api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result if not application.status == 'Active': msg = f"application not Active.(status:{application.status})" log_process_message('', 'error', msg, **_process_call_area) api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result app_rec = application.to_dict() client_rec = client.to_dict() input_dict.update(app_rec) input_dict.update(client_rec) # input_dict.update({'application_name': application.application_name}) # input_dict.update({'application_id': application.application_id}) # input_dict.update({'client_id': application.client_id}) # input_dict.update({'client_id': application.client_id}) # if not input_dict.get('status'): # input_dict.update({'status': 'Active'}) action_result = dbsession.table_action(dbmodel.APPLICATION_USER, action , input_dict, action_filter, auto_commit=True, caller_area=_process_call_area) api_result = action_result api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_application_template(dbsession, action, input_dict, action_filter={}, caller_area={}): _api_name = "dbapi_template" _api_entity = 'TEMPLATE' _api_action = action _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) log_process_input('', 'input_dict', input_dict,**_process_call_area) log_process_input('', 'action_filter', action_filter,**_process_call_area) log_process_input('', 'caller_area', caller_area,**_process_call_area) action_result = dbsession.table_action(dbmodel.APPLICATION_TEMPLATE, action, input_dict, action_filter, auto_commit=True, caller_area=_process_call_area) api_result = action_result api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_token(dbsession, action, input_dict, action_filter={}, caller_area={}): _api_name = "dbapi_token" _api_entity = 'TOKEN' _api_action = action _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) log_process_input('', 'input_dict', input_dict,**_process_call_area) log_process_input('', 'action_filter', action_filter,**_process_call_area) log_process_input('', 'caller_area', caller_area,**_process_call_area) action_result = dbsession.table_action(dbmodel.TOKEN, action , input_dict, action_filter, auto_commit=True, caller_area=_process_call_area) api_result = action_result api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_subscription(dbsession, action, input_dict, action_filter={}, caller_area={}): _api_name = "dbapi_subscription" _api_entity = 'SUBSCRIPTION' _api_action = action _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) log_process_input('', 'input_dict', input_dict,**_process_call_area) log_process_input('', 'action_filter', action_filter,**_process_call_area) log_process_input('', 'caller_area', caller_area,**_process_call_area) input_dict.update({'client_type': 'subscriber'}) if action.upper() in ('REGISTER','ADD','REFRESH'): user = dbsession.get(dbmodel.USER, input_dict, caller_area=_process_call_area) if not user: user_id = '' else: user_id = user.user_id input_dict.update({'user_id': user_id}) if action.upper() in ('REGISTER','ADD','REFRESH'): xaction='REFRESH' action_result = dbsession.table_action(dbmodel.CLIENT, xaction, input_dict, action_filter, auto_commit=True, caller_area=_process_call_area) api_result = action_result thismsg=action_result.get('api_message') api_result.update({'api_action': _api_action, 'api_name': _api_name}) if not api_result.get('api_status') == 'success': msg = f"subscription not registered. client record create failed" log_process_message(_api_msgID, 'error', msg, **_process_call_area) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result client = api_result.get('api_data') client_id = client.get('client_id') client_secretKey = client.get('client_secretKey') input_dict.update({'client_id': client_id}) input_dict.update({'client_secretKey': client_secretKey}) elif action.upper() in ('CONFIRM', 'ACTIVATE', 'DEACTIVATE', 'DELETE'): subscription_dict = dbsession.get(dbmodel.SUBSCRIPTION, input_dict, 'DICT', caller_area=_process_call_area) if not subscription_dict: msg = f'subscription not found' action_status='error' api_result = {'api_status': action_status, 'api_message': msg, 'api_data': input_dict, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result client=dbsession.get(dbmodel.CLIENT, subscription_dict,'', caller_area=_process_call_area) if not client: msg = f'client not found' action_status='error' api_result = {'api_status': action_status, 'api_message': msg, 'api_data': subscription_dict, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result client_id = client.client_id client_secretKey = client.client_secretKey input_dict.update({'client_id': client_id}) input_dict.update({'client_secretKey': client_secretKey}) api_result = dbsession.table_action(dbmodel.CLIENT, action, input_dict, action_filter, auto_commit=True, caller_area=_process_call_area) api_result.update({'api_action': _api_action, 'api_name': _api_name}) thismsg=api_result.get('api_message') if not api_result.get('api_status') == 'success': msg = f'action {action.upper()} on client {client_id} failed' log_process_message(_api_msgID, 'error', msg, **_process_call_area) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result client_dict = api_result.get('api_data', {}) client_status = client_dict.get('status') subscription_dict = dbsession.get(dbmodel.SUBSCRIPTION, subscription_dict, 'DICT', caller_area=_process_call_area) client_id=subscription_dict.get('client_id') input_dict.update({'status': client_status}) input_dict.update({'client_id': client_id}) action_result = dbsession.table_action(dbmodel.SUBSCRIPTION, action, input_dict, action_filter, auto_commit=True, caller_area=_process_call_area) api_result = action_result thismsg=thismsg.replace('CLIENT',_api_entity) api_result.update({'api_action': _api_action, 'api_name': _api_name,'api_message':thismsg}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_user(dbsession, action, input_dict, action_filter={}, caller_area={}): _api_name = "dbapi_user" _api_entity = 'USER' _api_action = action _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) log_process_input('', 'input_dict', input_dict,**_process_call_area) log_process_input('', 'action_filter', action_filter,**_process_call_area) log_process_input('', 'caller_area', caller_area,**_process_call_area) api_result = dbsession.table_action(dbmodel.USER, action , input_dict, action_filter, auto_commit=True, caller_area=_process_call_area) if not api_result.get('api_status') == 'success': log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result # #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_merchant(dbsession, action, input_dict, action_filter={}, caller_area={}): _api_name = "dbapi_merchant" _api_entity = 'MERCHANT' _api_action = action _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) if (action.upper().find('BANKACCOUNT') >= 0 and action.upper().find('GET') >= 0) or action.upper() in ('BANKACCOUNTS', 'BANKACCOUNT'): return dbapi_merchant_get_bankaccounts(dbsession, input_dict, action_filter, caller_area=_process_call_area) elif action.upper().find('BANKACCOUNT') >= 0 and action.upper().find('REGISTER') >= 0: return dbapi_merchant_bankaccount_register(dbsession, input_dict, action_filter, caller_area=_process_call_area) log_process_input('', 'input_dict', input_dict,**_process_call_area) log_process_input('', 'action_filter', action_filter,**_process_call_area) log_process_input('', 'caller_area', caller_area,**_process_call_area) input_dict.update({'client_type': 'merchant'}) if action.upper() in ('REGISTER','ADD','REFRESH'): action='REFRESH' action_result = dbsession.table_action(dbmodel.CLIENT, action, input_dict, action_filter, auto_commit=True, caller_area=_process_call_area) api_result = action_result api_result.update({'api_action': _api_action, 'api_name': _api_name}) thismsg=action_result.get('api_message') if not api_result.get('api_status') == 'success': log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result client = api_result.get('api_data') client_id = client.get('client_id') input_dict.update({'client_id': client_id}) elif action.upper() in ('CONFIRM', 'ACTIVATE', 'DEACTIVATE', 'DELETE'): merchant_dict = dbsession.get(dbmodel.MERCHANT, input_dict, 'DICT', caller_area=_process_call_area) if not merchant_dict: msg = f'merchant not found' action_status='error' api_result = {'api_status': action_status, 'api_message': msg, 'api_data': input_dict, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result client_dict=dbsession.get(dbmodel.CLIENT, merchant_dict,'DICT', caller_area=_process_call_area) if not client_dict: msg = f'client not found' action_status='error' api_result = {'api_status': action_status, 'api_message': msg, 'api_data': merchant_dict, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #action='CONFIRM' action_result = dbsession.table_action(dbmodel.CLIENT, action, input_dict, action_filter, auto_commit=True, caller_area=_process_call_area) api_result = action_result api_result.update({'api_action': _api_action, 'api_name': _api_name}) thismsg=action_result.get('api_message') # api_result = dbapi_client_confirm(client_dict) if not api_result.get('api_status') == 'success': # msg = f'client confirmation failed' # api_result.update({'api_message':msg}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result merchant_dict = dbsession.get(dbmodel.MERCHANT, merchant_dict, 'DICT', caller_area=_process_call_area) status=merchant_dict.get('status') client_id=merchant_dict.get('client_id') # if not merchant_dict.get('status') == 'Active': # msg = f"service provider not confirmed. status={status}" # action_status='error' # api_result = {'api_status': action_status, 'api_message': msg, 'api_data': merchant_dict, 'messages': messages, 'rows_added': rows_added, 'rows_updated': rows_updated, 'api_action': _api_action.upper(), 'api_name': _api_name} # log_process_finish(_api_msgID, api_result, **_process_call_area) # return api_result input_dict.update({'status': status}) input_dict.update({'client_id': client_id}) action_result = dbsession.table_action(dbmodel.MERCHANT, action, input_dict, action_filter, auto_commit=True, caller_area=_process_call_area) api_result = action_result #thismsg=thismsg.replace('CLIENT',_api_entity) api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_retail_store(dbsession, action, input_dict, action_filter={}, caller_area={}): _api_name = "dbapi_retail_store" _api_entity = 'RETAIL_STORE' _api_action = action _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) log_process_input('', 'input_dict', input_dict,**_process_call_area) log_process_input('', 'action_filter', action_filter,**_process_call_area) log_process_input('', 'caller_area', caller_area,**_process_call_area) if action.upper() in ('REGISTER', 'ADD', 'REFRESH'): merchant_id = input_dict.get('merchant_id') merch_rec = {'merchant_id': merchant_id} merchant_name = input_dict.get('merchant_name') if merchant_name: merch_rec.update({'merchant_name':merchant_name}) merchant = dbsession.get(dbmodel.MERCHANT, merch_rec, caller_area=_process_call_area) if not merchant: msg = f'merchant not found' log_process_message('', 'error', msg, **_process_call_area) api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result if not merchant.status == 'Active': msg = f"merchant not Active.(status:{merchant.status})" log_process_message('', 'error', msg, **_process_call_area) api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result input_dict.update({'merchant_id': merchant.merchant_id}) if not input_dict.get('status'): input_dict.update({'status': 'Active'}) action_result = dbsession.table_action(dbmodel.RETAIL_STORE, action , input_dict, action_filter, auto_commit=True, caller_area=_process_call_area) api_result = action_result api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_pointofsale(dbsession, action, input_dict, action_filter={}, caller_area={}): _api_name = "dbapi_pointofsale" _api_entity = 'POINT_OF_SALE' _api_action = action _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) if action.upper().find('BANKACCOUNT') >= 0 and (action.upper().find('ADD') >= 0 or action.upper().find('REGISTER') >= 0): return dbapi_pointofsale_bankaccount_remove(dbsession, input_dict, action_filter, caller_area=_process_call_area) elif action.upper().find('BANKACCOUNT') >= 0 and (action.upper().find('REMOVE') >= 0 or action.upper().find('DELETE') >= 0): return dbapi_pointofsale_bankaccount_add(dbsession, input_dict, action_filter, caller_area=_process_call_area) log_process_input('', 'input_dict', input_dict,**_process_call_area) log_process_input('', 'action_filter', action_filter,**_process_call_area) log_process_input('', 'caller_area', caller_area,**_process_call_area) if action.upper() in ('REGISTER', 'ADD', 'REFRESH'): retail_store_id = input_dict.get('retail_store_id') store_rec={'retail_store_id':retail_store_id} retail_store = dbsession.get(dbmodel.RETAIL_STORE, store_rec, caller_area=_process_call_area) if not retail_store: msg = f'retail_store not found' log_process_message('', 'error', msg, **_process_call_area) api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result if not retail_store.status == 'Active': msg = f"retail_store not Active.(status:{retail_store.status})" log_process_message('', 'error', msg, **_process_call_area) api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result input_dict.update({'retail_store_id': retail_store.retail_store_id}) merchant_id = retail_store.merchant_id merch_rec = {'merchant_id': merchant_id} merchant = dbsession.get(dbmodel.MERCHANT, merch_rec, caller_area=_process_call_area) if not merchant: msg = f'merchant not found' log_process_message('', 'error', msg, **_process_call_area) api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result if not merchant.status == 'Active': msg = f"merchant not Active.(status:{merchant.status})" log_process_message('', 'error', msg, **_process_call_area) api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result input_dict.update({'merchant_id': merchant.merchant_id}) if not input_dict.get('status'): input_dict.update({'status': 'Active'}) action_result = dbsession.table_action(dbmodel.POINT_OF_SALE, action , input_dict, action_filter, auto_commit=True, caller_area=_process_call_area) api_result = action_result api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_service_point(dbsession, action, input_dict, action_filter={}, caller_area={}): _api_name = "dbapi_service_point" _api_entity = 'SERVICE_POINT' _api_action = action _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) log_process_input('', 'input_dict', input_dict,**_process_call_area) log_process_input('', 'action_filter', action_filter,**_process_call_area) log_process_input('', 'caller_area', caller_area,**_process_call_area) if action.upper() in ('REGISTER', 'ADD', 'REFRESH'): retail_store_id = input_dict.get('retail_store_id') store_rec={'retail_store_id':retail_store_id} retail_store = dbsession.get(dbmodel.RETAIL_STORE, store_rec, caller_area=_process_call_area) if not retail_store: msg = f'retail_store not found' log_process_message('', 'error', msg, **_process_call_area) api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result if not retail_store.status == 'Active': msg = f"retail_store not Active.(status:{retail_store.status})" log_process_message('', 'error', msg, **_process_call_area) api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result input_dict.update({'retail_store_id': retail_store.retail_store_id}) merchant_id = retail_store.merchant_id merch_rec = {'merchant_id': merchant_id} merchant = dbsession.get(dbmodel.MERCHANT, merch_rec, caller_area=_process_call_area) if not merchant: msg = f'merchant not found' log_process_message('', 'error', msg, **_process_call_area) api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result if not merchant.status == 'Active': msg = f"merchant not Active.(status:{merchant.status})" log_process_message('', 'error', msg, **_process_call_area) api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result input_dict.update({'merchant_id': merchant.merchant_id}) if not input_dict.get('status'): input_dict.update({'status': 'Active'}) action_result = dbsession.table_action(dbmodel.SERVICE_POINT, action , input_dict, action_filter, auto_commit=True, caller_area=_process_call_area) api_result = action_result api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_customer_service_assistant(dbsession, action, input_dict, action_filter={}, caller_area={}): _api_name = "dbapi_customer_service_assistant" _api_entity = 'CUSTOMER_SERVICE_ASSISTANT' _api_action = action _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) log_process_input('', 'input_dict', input_dict,**_process_call_area) log_process_input('', 'action_filter', action_filter,**_process_call_area) log_process_input('', 'caller_area', caller_area,**_process_call_area) if action.upper() in ('REGISTER', 'ADD', 'REFRESH'): merchant_id=input_dict.get('merchant_id') merch_rec = {'merchant_id': merchant_id} merchant = dbsession.get(dbmodel.MERCHANT, merch_rec, caller_area=_process_call_area) if not merchant: msg = f'merchant not found' log_process_message('', 'error', msg, **_process_call_area) api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result if not merchant.status == 'Active': msg = f"merchant not Active.(status:{merchant.status})" log_process_message('', 'error', msg, **_process_call_area) api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result if not input_dict.get('status'): input_dict.update({'status': 'Active'}) #client if input_dict.get('email'): action='REFRESH' input_dict.update({'client_type': 'customer_service_assistant'}) action_result = dbsession.table_action(dbmodel.CLIENT, action, input_dict, action_filter, auto_commit=True, caller_area=_process_call_area) api_result = action_result api_result.update({'api_action': _api_action, 'api_name': _api_name}) thismsg=action_result.get('api_message') if not api_result.get('api_status') == 'success': log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result client = api_result.get('api_data') client_id = client.get('client_id') input_dict.update({'client_id': client_id}) action_result = dbsession.table_action(dbmodel.CUSTOMER_SERVICE_ASSISTANT, action , input_dict, action_filter, auto_commit=True, caller_area=_process_call_area) api_result = action_result api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_merchant_get_bankaccounts(dbsession, merchant_record, action_filter={}, caller_area={}): _api_name = "dbapi_merchant_get_bankaccounts" _api_entity = 'MERCHANT' _api_action = 'get_bank_accounts' _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) log_process_input('', 'input_dict', merchant_record,**_process_call_area) log_process_input('', 'action_filter', action_filter,**_process_call_area) log_process_input('', 'caller_area', caller_area,**_process_call_area) merchant = dbsession.get(dbmodel.MERCHANT, merchant_record, caller_area=_process_call_area) if not merchant: msg = f'merchant not found' log_process_message('', 'error', msg, **_process_call_area) api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result if not merchant.status == 'Active': msg = f"merchant not Active.(status:{merchant.status})" log_process_message('', 'error', msg, **_process_call_area) api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result client_id = merchant.client_id merchant_accounts=[] filterJson = {"client_id": client_id, "status": 'Active'} bank_accounts=dbsession.get_rows(dbmodel.BANK_ACCOUNT, filterJson, caller_area=_process_call_area) if bank_accounts: msg = f'[{len(bank_accounts)} bank accounts found] for merchant [{merchant.name}] client_id [{client_id}]' log_process_message('', 'success', msg, **_process_call_area) for bank_account in bank_accounts: bank_account_id = str(bank_account.bank_account_id) bank_accountID = str(bank_account.bank_accountID) merchant_accounts.append(bank_account_id) msg = f'OK. [{len(merchant_accounts)} bank accounts]' api_result = {'api_status': 'success', 'api_message': msg, 'data_records': len(merchant_accounts), 'api_data': merchant_accounts, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_merchant_bankaccount_register(dbsession, bankaccount_record, action_filter={}, caller_area={}): _api_name = "dbapi_merchant_bankaccount_register" _api_entity = 'MERCHANT' _api_action = 'register_bank_account' _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) log_process_input('', 'input_dict', bankaccount_record,**_process_call_area) log_process_input('', 'action_filter', action_filter,**_process_call_area) log_process_input('', 'caller_area', caller_area,**_process_call_area) merchant = dbsession.get(dbmodel.MERCHANT, bankaccount_record, caller_area=_process_call_area) if not merchant: msg = f'merchant not found' log_process_message('', 'error', msg, **_process_call_area) api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result if not merchant.status == 'Active': msg = f"merchant not Active.(status:{merchant.status})" log_process_message('', 'error', msg, **_process_call_area) api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result bank_account_id=None account_id = bankaccount_record.get('bank_account_id') if account_id: bank_account_id = dbapi_get_bank_account_id(dbsession, account_id, caller_area={}) if not bank_account_id: account_id = bankaccount_record.get('bank_accountID') if account_id: bank_account_id = dbapi_get_bank_account_id(dbsession, account_id, caller_area={}) if not bank_account_id: account_id = bankaccount_record.get('bank_account') if account_id: bank_account_id = dbapi_get_bank_account_id(dbsession, account_id, caller_area={}) if not bank_account_id: msg = f"bank_account not found" log_process_message('', 'error', msg, **_process_call_area) api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result bankaccount_record.update({'bank_account_id':bank_account_id}) bankaccount_record.update({'merchant_id':merchant.merchant_id}) bank_account = dbsession.get(dbmodel.BANK_ACCOUNT, {'bank_account_id':bank_account_id}, caller_area=_process_call_area) if not bank_account: msg = f"bank account not found" log_process_message('', 'error', msg, **_process_call_area) api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result if not bank_account.status == 'Active': msg = f"bank account {bank_account_id} not Active (status:{bank_account.status})" log_process_message('', 'error', msg, **_process_call_area) api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result merchant_record = bank_account.to_dict() merchant_record.update({'merchant_id': merchant.merchant_id}) api_result = dbsession.table_action(dbmodel.MERCHANT, 'UPDATE', merchant_record, action_filter, auto_commit=True, caller_area=_process_call_area) api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_pointofsale_bankaccount_add(dbsession, bankaccount_record, action_filter={}, caller_area={}): _api_name = "dbapi_pointofsale_bankaccount_add" _api_entity = 'POINT_OF_SALE' _api_action = 'add_bank_account' _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) log_process_input('', 'input_dict', bankaccount_record,**_process_call_area) log_process_input('', 'action_filter', action_filter,**_process_call_area) log_process_input('', 'caller_area', caller_area,**_process_call_area) pointofsale = dbsession.get(dbmodel.POINT_OF_SALE, bankaccount_record, caller_area=_process_call_area) if not pointofsale: msg = f'pointofsale not found' log_process_message('', 'error', msg, **_process_call_area) api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result if not pointofsale.status == 'Active': msg = f"pointofsale not Active.(status:{pointofsale.status})" log_process_message('', 'error', msg, **_process_call_area) api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result bank_account_id=None account_id = bankaccount_record.get('bank_account_id') if account_id: bank_account_id = dbapi_get_bank_account_id(dbsession, account_id, caller_area={}) if not bank_account_id: account_id = bankaccount_record.get('bank_accountID') if account_id: bank_account_id = dbapi_get_bank_account_id(dbsession, account_id, caller_area={}) if not bank_account_id: account_id = bankaccount_record.get('bank_account') if account_id: bank_account_id = dbapi_get_bank_account_id(dbsession, account_id, caller_area={}) if not bank_account_id: msg = f"bank_account not found" log_process_message('', 'error', msg, **_process_call_area) api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result bankaccount_record.update({'bank_account_id':bank_account_id}) bankaccount_record.update({'pointofsale_id':pointofsale.pointofsale_id}) bank_account = dbsession.get(dbmodel.BANK_ACCOUNT, {'bank_account_id':bank_account_id}, caller_area=_process_call_area) if not bank_account: msg = f"bank account not found" log_process_message('', 'error', msg, **_process_call_area) api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result if not bank_account.status == 'Active': msg = f"bank account {bank_account_id} not Active (status:{bank_account.status})" log_process_message('', 'error', msg, **_process_call_area) api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result pointofsale_record = bank_account.to_dict() pointofsale_record.update({'pointofsale_id': pointofsale.pointofsale_id}) api_result = dbsession.table_action(dbmodel.POINT_OF_SALE, 'UPDATE', pointofsale_record, action_filter, auto_commit=True, caller_area=_process_call_area) api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_pointofsale_bankaccount_remove(dbsession, pointofsale_record, action_filter={}, caller_area={}): _api_name = "dbapi_pointofsale_bankaccount_remove" _api_entity = 'POINT_OF_SALE' _api_action = 'remove_bank_account' _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) log_process_input('', 'input_dict', pointofsale_record,**_process_call_area) log_process_input('', 'action_filter', action_filter,**_process_call_area) log_process_input('', 'caller_area', caller_area,**_process_call_area) pointofsale = dbsession.get(dbmodel.POINT_OF_SALE, pointofsale_record, caller_area=_process_call_area) if not pointofsale: msg = f'pointofsale not found' log_process_message('', 'error', msg, **_process_call_area) api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result if not pointofsale.status == 'Active': msg = f"pointofsale not Active.(status:{pointofsale.status})" log_process_message('', 'error', msg, **_process_call_area) api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result pointofsale_record = pointofsale.to_dict() pointofsale_record.update({ 'bank_account_id' : '', 'bank_subscription_id' : '', 'bank_code' : '', 'bank_subscriptionID' : '', 'bank_accountID' : '', 'payments_currency' : '', }) api_result = dbsession.table_action(dbmodel.POINT_OF_SALE, 'UPDATE', pointofsale_record, action_filter, auto_commit=True, caller_area=_process_call_area) api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_pointofsale_credit_info(dbsession, pointofsale_record, action_filter={}, caller_area={}): _api_name = "dbapi_pointofsale_credit_info" _api_entity = 'POINT_OF_SALE' _api_action = 'get_pointofsale_credit_info' _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) log_process_input('', 'input_dict', pointofsale_record,**_process_call_area) log_process_input('', 'action_filter', action_filter,**_process_call_area) log_process_input('', 'caller_area', caller_area,**_process_call_area) pointofsale = dbsession.get(dbmodel.POINT_OF_SALE, pointofsale_record, caller_area=_process_call_area) if not pointofsale: msg = f'pointofsale not found' log_process_message('', 'error', msg, **_process_call_area) api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result if not pointofsale.status == 'Active': msg = f"pointofsale not Active.(status:{pointofsale.status})" log_process_message('', 'error', msg, **_process_call_area) api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result pointofsale_id=pointofsale.pointofsale_id merchant_id=pointofsale.merchant_id pointofsale_name = pointofsale.name bank_account_id = pointofsale.bank_account_id bank_subscription_id = pointofsale.bank_subscription_id bank_code = pointofsale.bank_code bank_subscriptionID = pointofsale.bank_subscriptionID bank_accountID = pointofsale.bank_accountID payments_currency = pointofsale.payments_currency pointofsale_record = pointofsale.to_dict() if not bank_accountID: merchant = dbsession.get(dbmodel.MERCHANT, {'merchant_id':merchant_id}, caller_area=_process_call_area) bank_account_id = merchant.bank_account_id bank_subscription_id = merchant.bank_subscription_id bank_code = merchant.bank_code bank_subscriptionID = merchant.bank_subscriptionID bank_accountID = merchant.bank_accountID payments_currency = merchant.payments_currency x = ' from merchant' else: x = ' from point_of_sale' credit_info = { 'pointofsale_id': pointofsale_id, 'pointofsale_name':pointofsale_name, 'bank_account_id':bank_account_id, 'bank_subscription_id':bank_subscription_id, 'bank_code':bank_code, 'bank_subscriptionID':bank_subscriptionID, 'bank_accountID':bank_accountID, 'payments_currency': payments_currency, } msg = f"OK. pointofsale credit info retrieved [{x}]" log_process_message('', 'success', msg, **_process_call_area) api_result = {'api_status': 'success', 'api_message': msg, 'api_data':credit_info, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_interaction(dbsession, action, input_dict, action_filter={}, caller_area={}): _api_name = "dbapi_interaction" _api_entity = 'INTERACTION' _api_action = action _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) if action.upper() in ('START','REQUEST'): return dbapi_interaction_start(dbsession, input_dict, caller_area=caller_area) elif action.upper() == 'ACCEPT': return dbapi_interaction_finish(dbsession, input_dict, caller_area=caller_area) elif action.upper() == 'FINISH': return dbapi_interaction_finish(dbsession, input_dict, caller_area=caller_area) elif action.upper() == 'MESSAGE': return dbapi_interaction_message_add(dbsession, input_dict, caller_area=caller_area) log_process_input('', 'input_dict', input_dict,**_process_call_area) log_process_input('', 'action_filter', action_filter,**_process_call_area) log_process_input('', 'caller_area', caller_area, **_process_call_area) action_result = dbsession.table_action(dbmodel.INTERACTION, action , input_dict, action_filter, auto_commit=True, caller_area=_process_call_area) api_result = action_result api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_interaction_message(dbsession, action, input_dict, action_filter={}, caller_area={}): _api_name = "dbapi_interaction_message" _api_entity = 'INTERACTION_MESSAGE' _api_action = action _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) log_process_input('', 'input_dict', input_dict,**_process_call_area) log_process_input('', 'action_filter', action_filter,**_process_call_area) log_process_input('', 'caller_area', caller_area,**_process_call_area) action_result = dbsession.table_action(dbmodel.INTERACTION_MESSAGE, action , input_dict, action_filter, auto_commit=True, caller_area=_process_call_area) api_result = action_result api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_bank(dbsession, action, input_dict, action_filter={}, caller_area={}): _api_name = "dbapi_bank" _api_entity = 'BANK' _api_action = action _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) log_process_input('', 'input_dict', input_dict,**_process_call_area) log_process_input('', 'action_filter', action_filter,**_process_call_area) log_process_input('', 'caller_area', caller_area,**_process_call_area) action_result = dbsession.table_action(dbmodel.BANK, action , input_dict, action_filter, auto_commit=True, caller_area=_process_call_area) api_result = action_result api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_bank_authorization(dbsession, action, input_dict, action_filter={}, caller_area={}): _api_name = "dbapi_bank_authorization" _api_entity = 'BANK_AUTHORIZATION' _api_action = action _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) log_process_input('', 'input_dict', input_dict,**_process_call_area) log_process_input('', 'action_filter', action_filter,**_process_call_area) log_process_input('', 'caller_area', caller_area,**_process_call_area) action_result = dbsession.table_action(dbmodel.BANK_AUTHORIZATION, action , input_dict, action_filter, auto_commit=True, caller_area=_process_call_area) api_result = action_result api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_bank_subscription(dbsession, action, input_dict, action_filter={}, caller_area={}): _api_name = "dbapi_bank_subscription" _api_entity = 'BANK_SUBSCRIPTION' _api_action = action _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) log_process_input('', 'input_dict', input_dict,**_process_call_area) log_process_input('', 'action_filter', action_filter,**_process_call_area) log_process_input('', 'caller_area', caller_area,**_process_call_area) action_result = dbsession.table_action(dbmodel.BANK_SUBSCRIPTION, action , input_dict, action_filter, auto_commit=True, caller_area=_process_call_area) api_result = action_result api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_bank_account(dbsession, action, input_dict, action_filter={}, caller_area={}): _api_name = "dbapi_bank_account" _api_entity = 'BANK_ACCOUNT' _api_action = action _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) log_process_input('', 'input_dict', input_dict,**_process_call_area) log_process_input('', 'action_filter', action_filter,**_process_call_area) log_process_input('', 'caller_area', caller_area,**_process_call_area) action_result = dbsession.table_action(dbmodel.BANK_ACCOUNT, action , input_dict, action_filter, auto_commit=True, caller_area=_process_call_area) api_result = action_result api_result.update({'api_action': _api_action, 'api_name': _api_name}) log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_get_bank_account_id(dbsession, any_accountid, caller_area={}): if not any_accountid: return None _api_name = "dbapi_get_bank_account_id" _api_entity = 'BANK_ACCOUNT' _api_action = 'get' _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) account = dbsession.get(dbmodel.BANK_ACCOUNT, {'bank_account_id': any_accountid}, caller_area=_process_call_area) if account: bank_account_id = account.bank_account_id else: account = dbsession.get(dbmodel.BANK_ACCOUNT, {'bank_accountID': any_accountid}, caller_area=_process_call_area) if account: bank_account_id = account.bank_account_id else: bank_account_id = None return bank_account_id #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_get_bank_code(dbsession, any_bank_id, return_field='bank_id', caller_area={}): if not any_bank_id: return None _api_name = "dbapi_get_bank_code" _api_entity = 'BANK' _api_action = 'get' _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) bank = dbsession.get(dbmodel.BANK, {'bank_id': any_bank_id}, caller_area=_process_call_area) if not bank: bank = dbsession.get(dbmodel.BANK, {'bank_code': any_bank_id}, caller_area=_process_call_area) if not bank: bank = dbsession.get(dbmodel.BANK, {'bank_BIC': any_bank_id}, caller_area=_process_call_area) if not bank: return None if return_field.upper().find('CODE') >=0: return bank.bank_code elif return_field.upper().find('BIC') >=0: return bank.bank_BIC else: return bank.bank_id #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_device_log(dbsession, device_uid, application_name, geolocation_lat, geolocation_lon, client_id, caller_area={}): _api_name="dbapi_device_log" _api_entity = 'DEVICE' _api_action = 'log' _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) log_process_input('', 'device_uid', device_uid,**_process_call_area) log_process_input('', 'application_name', application_name,**_process_call_area) log_process_input('', 'geolocation_lat', geolocation_lat,**_process_call_area) log_process_input('', 'geolocation_lon', geolocation_lon,**_process_call_area) log_process_input('', 'client_id', client_id,**_process_call_area) # print(geolocation_lat,geolocation_lon) # glat=geoloc_to_integer(geolocation_lat) # glon = geoloc_to_integer(geolocation_lon) # print(glat,glon) # glat2=integer_to_geoloc(glat) # glon2 = integer_to_geoloc(glon) # print(glat2,glon2) # geolocation_lat=geoloc_to_integer(geolocation_lat) # geolocation_lon=geoloc_to_integer(geolocation_lon) # print(geolocation_lat,geolocation_lon) now = datetime.datetime.utcnow() application_id = None if not application_name: application_name='?' application = dbsession.get(dbmodel.APPLICATION, {'application_name': application_name}, caller_area=_process_call_area) if application: application_id = application.application_id device_record = {'device_uid': device_uid, 'last_usage_geolocation_lat': geolocation_lat, 'last_usage_geolocation_lon': geolocation_lon, 'last_usage_timestamp': now} usage_record = {'device_uid': device_uid, 'application_name': application_name, 'geolocation_lat': geolocation_lat, 'geolocation_lon': geolocation_lon, 'client_id': client_id} client_device_record = {'device_uid': device_uid, 'client_id': client_id, 'application_name': application_name, 'application_id': application_id, 'last_usage_timestamp': now} device = dbsession.refresh(dbmodel.DEVICE, device_record, auto_commit=False, caller_area=_process_call_area) device_usage = dbsession.refresh(dbmodel.DEVICE_USAGE,usage_record, auto_commit=False, caller_area=_process_call_area) client_device = dbsession.refresh(dbmodel.CLIENT_DEVICE,client_device_record, auto_commit=False, caller_area=_process_call_area) dbsession.commit(**_process_call_area) if client_device: logged_record = client_device.to_dict() if device.times_used <= 1: msg=f"OK. new device logged" else: msg = f"OK. device logged, times_used:{device_usage.times_used}/{client_device.times_used}" log_process_message('', 'success', msg, **_process_call_area) api_result = {'api_status': 'success', 'api_message': msg, 'api_data': logged_record, 'api_action': _api_action.upper(), 'api_name': _api_name} else: msg = f"device logged FAILED" api_result = {'api_status': 'error', 'api_message': msg, 'api_data': {}, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_token_is_valid(dbsession, token, caller_area={}): _api_name = "dbapi_token_is_valid" _api_entity = 'TOKEN' _api_action = 'validation' _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) log_process_input('', 'token', token,**_process_call_area) log_process_input('', 'caller_area', caller_area,**_process_call_area) if type(token) == type(''): input_dict = {'token': token} elif type(token) == type({}): input_dict = token else: msg='invalid token provided' log_process_message('', 'error', msg, **_process_call_area) return False if not input_dict.get('token'): msg='no token provided' log_process_message('', 'error', msg, **_process_call_area) return False token_record = dbsession.get(dbmodel.TOKEN, input_dict, caller_area=_process_call_area) if not token_record: msg = f'access token is NOT valid.(not found)' log_process_message('', 'error', msg, **_process_call_area) return False expiryDT = token_record.expiryDT if not expiryDT: msg = f'access token is NOT valid.(no expiryDT)' log_process_message('', 'error', msg, **_process_call_area) return False #universal time #GMT=Greenwich Mean Time #UTC=Coordinated Universal Time #There is no time difference between Coordinated Universal Time and Greenwich Mean Time #nowString = datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S') #now=datetime.datetime.utcnow() if expiryDT < datetime.datetime.utcnow(): msg = f'access token is NOT valid.(expired)' api_result = False api_result = True log_process_result(_api_msgID, api_result, data_name='access_token_is_valid', **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_token_get_access_token(dbsession, token_request, caller_area={}): _api_name = "dbapi_token_get_access_token" _api_entity = 'TOKEN' _api_action = 'GET_TOKEN' _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) log_process_input('', 'token_request', token_request,**_process_call_area) log_process_input('', 'caller_area', caller_area,**_process_call_area) application_name=token_request.get('application_name') client_id=token_request.get('application_client_id') client_secretKey=token_request.get('application_client_secretKey') application = dbsession.get(dbmodel.APPLICATION, {'application_name': application_name, 'client_id': client_id}, caller_area=_process_call_area) if not application: msg='application not registered' api_result={'api_status': 'error', 'api_message': msg,'api_data':{}} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result if not application.client_id == client_id or not application.client_secretKey == client_secretKey: msg='application credentials not valid' api_result={'api_status': 'error', 'api_message': msg,'api_data':{}} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result token_duration_secs = 3600 #1 hour if token_request.get('token_scope') == 'application_service': token_duration_secs = 3600 #1 hour token_request.update({'duration_seconds':token_duration_secs}) token_request.update({'status':'Active'}) expiryDT = datetime.datetime.utcnow() + datetime.timedelta(seconds=token_duration_secs) token_request.update({'expiryDT': expiryDT}) if 'token' in token_request.keys(): token_request.pop('token') token = dbsession.insert(dbmodel.TOKEN, token_request,auto_commit=True, caller_area=_process_call_area) if not token: msg='token generation failed' api_result={'api_status': 'system error', 'api_message': msg,'api_data':{}} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result token_record = { 'token_type': token.token_type, 'token_scope': token.token_scope, 'grant_type': token.grant_type, 'token': token.token, 'duration_seconds': token.duration_seconds, 'expiryDT': token.expiryDT, } msg='OK. token generated' api_result={'api_status': 'success', 'api_message': msg,'api_data':token_record} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_cleanup_tokens(dbsession, caller_area={}): _api_name = "debapi_cleanup_tokens" _api_entity = 'TOKEN' _api_action = 'CLEANUP' _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) where_expression = {'status': 'Expired'} deleted_result = dbsession.delete_rows(dbmodel.TOKEN, where_expression, auto_commit=True) deleted_rows = deleted_result.get('rows_deleted', 0) #nowString = datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S') #where_expression = f"expiryDT<'{datetime.datetime.utcnow()}'" where_expression = {'expiryDT': {datetime.datetime.utcnow()}} update_dict = {'status': 'Expired'} expired_result = dbsession.update_rows(dbmodel.TOKEN, update_dict,where_expression, auto_commit=True, caller_area=_process_call_area) expired_rows = expired_result.get('rows_updated', 0) msg = f'tokens cleaned with {expired_rows} tokens expired, {deleted_rows} removed.' api_result = {'api_status': 'success', 'api_message': msg, 'rows_expired': expired_rows, 'rows_removed': deleted_rows} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_interaction_start(dbsession, input_dict, caller_area={}): _api_name = "dbapi_interaction_start" _api_entity = 'INTERACTION' _api_action = 'START' _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) log_process_input('', 'input_dict', input_dict,**_process_call_area) log_process_input('', 'caller_area', caller_area,**_process_call_area) #//////////////////////////////////////// originator = None originator_id = None originator_name = None corresponder = None corresponder_id = None corresponder_name = None #//////////////////////////////////////// #step-1: originator (originator, originator_id, originator_name) = find_originator(dbsession, input_dict, _process_call_area) # originator_id = input_dict.get('originator_id') # if originator_id: # xid = dbsession.get(dbmodel.CLIENT, {'client_id':originator_id}, caller_area=_process_call_area) # if xid: # originator='client' # originator_name = xid.email # else: # xid = dbsession.get(dbmodel.POINT_OF_SALE, {'pointofsale_id':originator_id}, caller_area=_process_call_area) # if xid: # originator='pointofsale' # originator_name = xid.name # else: # msg = f'originator not valid' # api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} # log_process_finish(_api_msgID, api_result, **_process_call_area) # return api_result # xoriginator = input_dict.get('originator') # if xoriginator and not xoriginator == originator: # msg = f'originator_id not valid for originator {xoriginator}' # api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} # log_process_finish(_api_msgID, api_result, **_process_call_area) # return api_result # else: # client_id = input_dict.get('client_id') # if client_id: # client=dbsession.get(dbmodel.CLIENT, {'client_id':client_id}, caller_area=_process_call_area) # if not client: # msg = f'client not found' # api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} # log_process_finish(_api_msgID, api_result, **_process_call_area) # return api_result # originator='client' # originator_id=client_id # originator_name = client.email # else: # pointofsale_id = input_dict.get('pointofsale_id') # if pointofsale_id: # pointofsale=dbsession.get(dbmodel.POINT_OF_SALE, {'pointofsale_id':pointofsale_id}, caller_area=_process_call_area) # if not pointofsale: # msg = f'pointofsale not found' # api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} # log_process_finish(_api_msgID, api_result, **_process_call_area) # return api_result # originator='pointofsale' # originator_id=pointofsale_id # originator_name = pointofsale.name # else: # msg = f'no pointofsale or consumer or client defined' # api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} # log_process_finish(_api_msgID, api_result, **_process_call_area) # return api_result if not originator_id: msg = f'originator not defined (pointofsale or client or service_point)' api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result xoriginator = input_dict.get('originator') if xoriginator and not xoriginator == originator: msg = f'originator_id not valid for originator {xoriginator}' api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result input_dict.update({'originator': originator}) input_dict.update({'originator_id': originator_id}) input_dict.update({'originator_name': originator_name}) msg = f'originator set to [{originator_name}]' log_process_message('', 'success', msg, **_process_call_area) #step-2: corresponder (corresponder, corresponder_id, corresponder_name) = find_corresponder(dbsession, input_dict, _process_call_area) # corresponder_id = input_dict.get('corresponder_id') # if not corresponder_id: # msg = f'no corresponder specified' # api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} # log_process_finish(_api_msgID, api_result, **_process_call_area) # return api_result # pointofsale = dbsession.get(dbmodel.POINT_OF_SALE, {'pointofsale_id':corresponder_id}, caller_area=_process_call_area) # if pointofsale: # corresponder='pointofsale' # corresponder_id = pointofsale.pointofsale_id # corresponder_name = pointofsale.name # else: # xid = dbsession.get(dbmodel.CLIENT, {'client_id':corresponder_id}, caller_area=_process_call_area) # if xid: # corresponder='client' # corresponder_id=xid.client_id # corresponder_name = xid.email # else: # msg = f'corresponder not valid' # api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} # log_process_finish(_api_msgID, api_result, **_process_call_area) # return api_result if not corresponder_id: msg = f'corresponder not valid' api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result xcorresponder = input_dict.get('corresponder') if xcorresponder and not xcorresponder == corresponder: msg = f'corresponder_id not valid for corresponder {xcorresponder}' api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result input_dict.update({'corresponder': corresponder}) input_dict.update({'corresponder_id': corresponder_id}) input_dict.update({'corresponder_name': corresponder_name}) msg = f'corresponder set to [{corresponder_name}]' log_process_message('', 'success', msg, **_process_call_area) #step-3: already active filterJson = {"originator": originator, "originator_id": originator_id, "status": 'Active'} active_interactions=dbsession.get_rows(dbmodel.INTERACTION, filterJson, caller_area=_process_call_area) if active_interactions: msg = f'[{len(active_interactions)} active interactions found] for originator [{originator_name}]' log_process_message('', 'warning', msg, **_process_call_area) for active_interaction in active_interactions: interaction_id = active_interaction.interaction_id time_start = active_interaction.row_timestamp time_end = datetime.datetime.utcnow() diff = time_end - time_start duration = diff.days * 24 * 60 * 60 + diff.seconds interaction_rec = active_interaction.to_dict() interaction_rec.update({'status':'canceled','completed_timestamp':time_end,'duration':duration}) active_interaction = dbsession.update(dbmodel.INTERACTION, interaction_rec, auto_commit=True, caller_area=_process_call_area) #step-4: corresponder is available (not active) filterJson = {"corresponder": corresponder, "corresponder_id": corresponder_id, "status": 'Active'} active_interactions=dbsession.get_rows(dbmodel.INTERACTION, filterJson, caller_area=_process_call_area) if active_interactions: msg = f'[{len(active_interactions)} active interaction(s) found] for corresponder [{corresponder_name}]' log_process_message('', 'warning', msg, **_process_call_area) for active_interaction in active_interactions: interaction_id = active_interaction.interaction_id time_start = active_interaction.row_timestamp time_end = datetime.datetime.utcnow() diff = time_end - time_start duration = diff.days * 24 * 60 * 60 + diff.seconds if duration>5*60: # 5 minutes interaction_rec = active_interaction.to_dict() interaction_rec.update({'status':'canceled-timeout','completed_timestamp':time_end,'duration':duration}) active_interaction = dbsession.update(dbmodel.INTERACTION, interaction_rec, auto_commit=True, caller_area=_process_call_area) msg = f'corresponder {corresponder_name} interaction {interaction_id} timed-out and canceled after {duration/60} minutes' log_process_message('', 'warning', msg, **_process_call_area) filterJson = {"corresponder": corresponder, "corresponder_id": corresponder_id, "status": 'Active'} active_interactions=dbsession.get_rows(dbmodel.INTERACTION, filterJson, caller_area=_process_call_area) if active_interactions: msg = f'corresponder {corresponder_name} is not available' log_process_message('', 'error', msg, **_process_call_area) api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #step-5: insert input_dict.update({'status': 'Requested'}) interaction = dbsession.insert(dbmodel.INTERACTION, input_dict, auto_commit=True, caller_area=_process_call_area) if not interaction: msg = f'interaction start failed' api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result interaction_id = interaction.interaction_id #step-6: message interaction_message = { 'interaction_id':interaction_id, 'originator_id':interaction.originator_id, 'originator': interaction.originator, 'originator_name': interaction.originator_name, 'message_type':'start', 'message_record':f"hi. i am {interaction.originator} {interaction.originator_name} and i want to interact with {interaction.corresponder} {interaction.corresponder_name}", 'content_type':'text', 'format':'', 'application_name': input_dict.get('application_name'), 'geolocation_lat': input_dict.get('geolocation_lat'), 'geolocation_lon': input_dict.get('geolocation_lon'), } start_message = dbsession.insert(dbmodel.INTERACTION_MESSAGE, interaction_message, auto_commit=True, caller_area=_process_call_area) if not start_message: msg = f'start message insert failed' log_process_message('', 'error', msg, **_process_call_area) msg = f'interaction start failed (message insert failed)' api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #shalimar #step-6: result interaction_record = interaction.to_dict() msg=f'OK. interaction established between You and {corresponder.upper()} {corresponder_name}' api_result = {'api_status': 'success', 'api_message': msg, 'interaction_id': interaction_id, 'api_data': interaction_record, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_interaction_accept(dbsession, input_dict, caller_area={}): _api_name = "dbapi_interaction_accept" _api_entity = 'INTERACTION' _api_action = 'ACCEPT' _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) log_process_input('', 'input_dict', input_dict,**_process_call_area) log_process_input('', 'caller_area', caller_area,**_process_call_area) interaction_id = input_dict.get('interaction_id') if not interaction_id: msg = f'interaction not defined' log_process_message('', 'error', msg, **_process_call_area) api_result = {'api_status': 'error', 'api_message': msg} log_process_finish(_api_msgID, api_result, **_process_call_area) return interaction = dbsession.get(dbmodel.INTERACTION, {'interaction_id':interaction_id}, caller_area=_process_call_area) if not interaction: msg = f'interaction not found' api_result = {'api_status': 'error', 'api_message': msg} log_process_finish(_api_msgID, api_result, **_process_call_area) return if interaction.status == 'Active': msg = f'interaction is already Active' api_result = {'api_status': 'error', 'api_message': msg} log_process_finish(_api_msgID, api_result, **_process_call_area) return if not interaction.status=='Requested': msg = f'interaction is already [{interaction.status}]' api_result = {'api_status': 'error', 'api_message': msg} log_process_finish(_api_msgID, api_result, **_process_call_area) return #//////////////////////////////////////// originator = None originator_id = None originator_name = None #//////////////////////////////////////// # #step-1: originator # originator_id = input_dict.get('originator_id') # if originator_id: # xid = dbsession.get(dbmodel.CLIENT, {'client_id':originator_id}, caller_area=_process_call_area) # if xid: # originator='client' # originator_name = xid.email # else: # xid = dbsession.get(dbmodel.POINT_OF_SALE, {'pointofsale_id':originator_id}, caller_area=_process_call_area) # if xid: # originator='pointofsale' # originator_name = xid.name # else: # msg = f'originator not valid' # api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} # log_process_finish(_api_msgID, api_result, **_process_call_area) # return api_result # xoriginator = input_dict.get('originator') # if xoriginator and not xoriginator == originator: # msg = f'originator_id not valid for originator {xoriginator}' # log_process_message('', 'error', msg, **_process_call_area) # api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} # log_process_finish(_api_msgID, api_result, **_process_call_area) # return api_result # else: # client_id = input_dict.get('client_id') # if client_id: # client=dbsession.get(dbmodel.CLIENT, {'client_id':client_id}, caller_area=_process_call_area) # if not client: # msg = f'client not found' # api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} # log_process_finish(_api_msgID, api_result, **_process_call_area) # return api_result # originator='client' # originator_id=client_id # originator_name = client.email # else: # pointofsale_id = input_dict.get('pointofsale_id') # if pointofsale_id: # pointofsale=dbsession.get(dbmodel.POINT_OF_SALE, {'pointofsale_id':pointofsale_id}, caller_area=_process_call_area) # if not pointofsale: # msg = f'pointofsale not found' # api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} # log_process_finish(_api_msgID, api_result, **_process_call_area) # return api_result # originator='pointofsale' # originator_id=pointofsale_id # originator_name = pointofsale.name # else: # msg = f'no pointofsale or client defined' # api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} # log_process_finish(_api_msgID, api_result, **_process_call_area) # return api_result (originator, originator_id, originator_name) = find_originator(dbsession, input_dict, _process_call_area) if not originator_id: msg = f'originator not defined (pointofsale or client or service_point)' api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result xoriginator = input_dict.get('originator') if xoriginator and not xoriginator == originator: msg = f'originator_id not valid for originator {xoriginator}' api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result # if not originator or not originator_id: # msg = f'originator(as corresponder) not defined (pointofsale or client)' # log_process_message('', 'error', msg, **_process_call_area) # api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} # log_process_finish(_api_msgID, api_result, **_process_call_area) # return api_result if originator_id == interaction.originator_id: msg = f'accepter [{originator_name}] same as requestor' log_process_message('', 'error', msg, **_process_call_area) api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result if interaction.corresponder_id: if not originator_id == interaction.corresponder_id: msg = f'interaction must be accepted by [{interaction.corresponder}] {interaction.corresponder_name} [not] by [{originator}] {originator_name} ' api_result = {'api_status': 'error', 'api_message': msg} log_process_finish(_api_msgID, api_result, **_process_call_area) return input_dict.update({'corresponder': originator}) input_dict.update({'corresponder_id': originator_id}) input_dict.update({'corresponder_name': originator_name}) msg=f'corresponder: [{originator}] [[{originator_name}]]' log_process_message('', 'success', msg, **_process_call_area) interaction_message = { 'interaction_id': interaction_id, 'originator_id': originator_id, 'originator': originator, 'originator_name': originator_name, 'message_type':'accept', 'message_record':f"hi. i am {originator} {originator_name}. how can i help you Mr. {interaction.originator} {interaction.originator_name}", 'content_type':input_dict.get('content_type','text'), 'format':input_dict.get('format',''), 'application_name': input_dict.get('application_name'), 'geolocation_lat': input_dict.get('geolocation_lat'), 'geolocation_lon': input_dict.get('geolocation_lon'), } message = dbsession.insert(dbmodel.INTERACTION_MESSAGE, interaction_message, auto_commit=True, caller_area=_process_call_area) if not message: msg = f'interaction message add failed' log_process_message('', 'error', msg, **_process_call_area) time_start = interaction.row_timestamp time_end = datetime.datetime.utcnow() diff = time_end - time_start duration = diff.days * 24 * 60 * 60 + diff.seconds interaction_rec = interaction.to_dict() interaction_rec.update({ 'corresponder': originator, 'corresponder_id': originator_id, 'corresponder_name': originator_name, 'status': 'Active', 'last_usage_timestamp': datetime.datetime.utcnow(), 'accept_geolocation_lat':input_dict.get('geolocation_lat'), 'accept_geolocation_lon':input_dict.get('geolocation_lon'), }) interaction = dbsession.update(dbmodel.INTERACTION, interaction_rec, auto_commit=True, caller_area=_process_call_area) if not interaction: msg = f'interaction accept failed' api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_message('', 'error', msg, **_process_call_area) return api_result #step-6: result interaction_rec = interaction.to_dict() msg=f'OK. interaction accepted' api_result = {'api_status': 'success', 'api_message': msg, 'interaction_id': interaction.interaction_id, 'api_data': interaction_rec, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_interaction_finish(dbsession, input_dict, caller_area={}): _api_name = "dbapi_interaction_finish" _api_entity = 'INTERACTION' _api_action = 'FINISH' _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) log_process_input('', 'input_dict', input_dict,**_process_call_area) log_process_input('', 'caller_area', caller_area,**_process_call_area) interaction_id = input_dict.get('interaction_id') if not interaction_id: msg = f'interaction not defined' log_process_message('', 'error', msg, **_process_call_area) api_result = {'api_status': 'error', 'api_message': msg} log_process_finish(_api_msgID, api_result, **_process_call_area) return interaction = dbsession.get(dbmodel.INTERACTION, {'interaction_id':interaction_id}, caller_area=_process_call_area) if not interaction: msg = f'interaction not found' api_result = {'api_status': 'error', 'api_message': msg} log_process_finish(_api_msgID, api_result, **_process_call_area) return if not interaction.status=='Active': msg = f'interaction not Active' api_result = {'api_status': 'error', 'api_message': msg} log_process_finish(_api_msgID, api_result, **_process_call_area) return #//////////////////////////////////////// originator = None originator_id = None originator_name = None #//////////////////////////////////////// #step-1: originator # originator_id = input_dict.get('originator_id') # if originator_id: # xid = dbsession.get(dbmodel.CLIENT, {'client_id':originator_id}, caller_area=_process_call_area) # if xid: # originator='client' # originator_name = xid.email # else: # xid = dbsession.get(dbmodel.POINT_OF_SALE, {'pointofsale_id':originator_id}, caller_area=_process_call_area) # if xid: # originator='pointofsale' # originator_name = xid.name # else: # msg = f'originator not valid' # api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} # log_process_finish(_api_msgID, api_result, **_process_call_area) # return api_result # xoriginator = input_dict.get('originator') # if xoriginator and not xoriginator == originator: # msg = f'originator_id not valid for originator {xoriginator}' # log_process_message('', 'error', msg, **_process_call_area) # api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} # log_process_finish(_api_msgID, api_result, **_process_call_area) # return api_result # else: # client_id = input_dict.get('client_id') # if client_id: # client=dbsession.get(dbmodel.CLIENT, {'client_id':client_id}, caller_area=_process_call_area) # if not client: # msg = f'client not found' # api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} # log_process_finish(_api_msgID, api_result, **_process_call_area) # return api_result # originator='client' # originator_id=client_id # originator_name = client.email # else: # pointofsale_id = input_dict.get('pointofsale_id') # if pointofsale_id: # pointofsale=dbsession.get(dbmodel.POINT_OF_SALE, {'pointofsale_id':pointofsale_id}, caller_area=_process_call_area) # if not pointofsale: # msg = f'pointofsale not found' # api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} # log_process_finish(_api_msgID, api_result, **_process_call_area) # return api_result # originator='pointofsale' # originator_id=pointofsale_id # originator_name = pointofsale.name # else: # msg = f'no pointofsale or client defined' # api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} # log_process_finish(_api_msgID, api_result, **_process_call_area) # return api_result (originator, originator_id, originator_name) = find_originator(dbsession, input_dict, _process_call_area) if not originator_id: msg = f'originator not defined (pointofsale or client or service_point)' api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result xoriginator = input_dict.get('originator') if xoriginator and not xoriginator == originator: msg = f'originator_id not valid for originator {xoriginator}' api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result # if not originator or not originator_id: # msg = f'originator not defined (pointofsale or client)' # log_process_message('', 'error', msg, **_process_call_area) # api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} # log_process_finish(_api_msgID, api_result, **_process_call_area) # return api_result if not (originator_id == interaction.originator_id or originator_id == interaction.corresponder_id): msg = f'invalid originator [{originator_name}] for interaction [{interaction.interaction_id}]' log_process_message('', 'error', msg, **_process_call_area) api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result input_dict.update({'originator': originator}) input_dict.update({'originator_id': originator_id}) input_dict.update({'originator_name': originator_name}) msg=f'originator: [{originator}] [[{originator_name}]]' log_process_message('', 'success', msg, **_process_call_area) interaction_message = { 'interaction_id': interaction_id, 'originator_id': input_dict.get('originator_id', ''), 'originator': input_dict.get('originator', ''), 'originator_name': input_dict.get('originator_name', ''), 'content_type': input_dict.get('content_type', 'text'), 'format': input_dict.get('format', ''), 'application_name': input_dict.get('application_name'), 'geolocation_lat': input_dict.get('geolocation_lat'), 'geolocation_lon': input_dict.get('geolocation_lon'), 'message_type': 'finish', 'message_record':f"goodbye. Thank you for interacting with us.{input_dict.get('originator')} {input_dict.get('originator_name')}", } message = dbsession.insert(dbmodel.INTERACTION_MESSAGE, interaction_message, auto_commit=True, caller_area=_process_call_area) if not message: msg = f'interaction message add failed' log_process_message('', 'error', msg, **_process_call_area) time_start = interaction.row_timestamp time_end = datetime.datetime.utcnow() diff = time_end - time_start duration = diff.days * 24 * 60 * 60 + diff.seconds interaction_rec = interaction.to_dict() interaction_rec.update({'status':'completed','completed_timestamp':time_end,'duration':duration}) interaction = dbsession.update(dbmodel.INTERACTION, interaction_rec, auto_commit=True, caller_area=_process_call_area) if not interaction: msg = f'interaction finish failed' api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_message('', 'error', msg, **_process_call_area) return api_result #step-6: result interaction_rec = interaction.to_dict() msg=f'OK. interaction finish' api_result = {'api_status': 'success', 'api_message': msg, 'interaction_id': interaction.interaction_id, 'api_data': interaction_rec, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def dbapi_interaction_message_add(dbsession, input_dict, caller_area={}): _api_name = "dbapi_interaction_message_add" _api_entity = 'INTERACTION_MESSAGE' _api_action = 'ADD' _api_msgID = set_msgID(_api_name, _api_action, _api_entity) _process_identity_kwargs = {'type': 'api', 'module': module_id, 'name': _api_name, 'action': _api_action, 'entity': _api_entity, 'msgID': _api_msgID,} _process_adapters_kwargs = {'dbsession': dbsession} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) log_process_start(_api_msgID,**_process_call_area) log_process_input('', 'input_dict', input_dict,**_process_call_area) log_process_input('', 'caller_area', caller_area,**_process_call_area) interaction_id = input_dict.get('interaction_id') if not interaction_id: msg = f'interaction not defined' log_process_message('', 'error', msg, **_process_call_area) api_result = {'api_status': 'error', 'api_message': msg} log_process_finish(_api_msgID, api_result, **_process_call_area) return interaction = dbsession.get(dbmodel.INTERACTION, {'interaction_id':interaction_id}, caller_area=_process_call_area) if not interaction: msg = f'interaction not found' api_result = {'api_status': 'error', 'api_message': msg} log_process_finish(_api_msgID, api_result, **_process_call_area) return if not interaction.status=='Active': msg = f'interaction not Active' api_result = {'api_status': 'error', 'api_message': msg} log_process_finish(_api_msgID, api_result, **_process_call_area) return #//////////////////////////////////////// originator = None originator_id = None originator_name = None #//////////////////////////////////////// #step-1: originator # originator_id = input_dict.get('originator_id') # if originator_id: # xid = dbsession.get(dbmodel.CLIENT, {'client_id':originator_id}, caller_area=_process_call_area) # if xid: # originator='client' # originator_name = xid.email # else: # xid = dbsession.get(dbmodel.POINT_OF_SALE, {'pointofsale_id':originator_id}, caller_area=_process_call_area) # if xid: # originator='pointofsale' # originator_name = xid.name # else: # msg = f'originator not valid' # api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} # log_process_finish(_api_msgID, api_result, **_process_call_area) # return api_result # xoriginator = input_dict.get('originator') # if xoriginator and not xoriginator == originator: # msg = f'originator_id not valid for originator {xoriginator}' # log_process_message('', 'error', msg, **_process_call_area) # api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} # log_process_finish(_api_msgID, api_result, **_process_call_area) # return api_result # else: # client_id = input_dict.get('client_id') # if client_id: # client=dbsession.get(dbmodel.CLIENT, {'client_id':client_id}, caller_area=_process_call_area) # if not client: # msg = f'client not found' # api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} # log_process_finish(_api_msgID, api_result, **_process_call_area) # return api_result # originator='client' # originator_id=client_id # originator_name = client.email # else: # pointofsale_id = input_dict.get('pointofsale_id') # if pointofsale_id: # pointofsale=dbsession.get(dbmodel.POINT_OF_SALE, {'pointofsale_id':pointofsale_id}, caller_area=_process_call_area) # if not pointofsale: # msg = f'pointofsale not found' # api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} # log_process_finish(_api_msgID, api_result, **_process_call_area) # return api_result # originator='pointofsale' # originator_id=pointofsale_id # originator_name = pointofsale.name # else: # msg = f'no pointofsale or client defined' # api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} # log_process_finish(_api_msgID, api_result, **_process_call_area) # return api_result # if not originator or not originator_id: # msg = f'originator not defined (pointofsale or client)' # log_process_message('', 'error', msg, **_process_call_area) # api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} # log_process_finish(_api_msgID, api_result, **_process_call_area) # return api_result (originator, originator_id, originator_name) = find_originator(dbsession, input_dict, _process_call_area) if not originator_id: msg = f'originator not defined (pointofsale or client or service_point)' api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result xoriginator = input_dict.get('originator') if xoriginator and not xoriginator == originator: msg = f'originator_id not valid for originator {xoriginator}' api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result input_dict.update({'originator': originator}) input_dict.update({'originator_id': originator_id}) input_dict.update({'originator_name': originator_name}) if not (originator_id == interaction.originator_id or originator_id == interaction.corresponder_id): msg = f'invalid originator [{originator_name}] for interaction [{interaction.interaction_id}]' api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result msg=f'originator: [{originator}] [[{originator_name}]]' log_process_message('', 'success', msg, **_process_call_area) interaction_message = { 'interaction_id': interaction_id, 'originator_id': originator_id, 'originator': originator, 'originator_name': originator_name, 'message_type': input_dict.get('message_type', 'message'), 'message_record': input_dict.get('message_record', ''), 'content_type': input_dict.get('content_type', 'text'), 'format': input_dict.get('format', ''), 'application_name': input_dict.get('application_name'), 'geolocation_lat': input_dict.get('geolocation_lat'), 'geolocation_lon': input_dict.get('geolocation_lon'), } message = dbsession.insert(dbmodel.INTERACTION_MESSAGE, interaction_message, auto_commit=True, caller_area=_process_call_area) if not message: msg = f'interaction message add failed' api_result = {'api_status': 'error', 'api_message': msg, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #step-6: result message_record = message.to_dict() msg=f'OK. interaction message added' api_result = {'api_status': 'success', 'api_message': msg, 'interaction_message_id': message.interaction_message_id, 'api_data': message_record, 'api_action': _api_action.upper(), 'api_name': _api_name} log_process_finish(_api_msgID, api_result, **_process_call_area) return api_result #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def find_originator(dbsession,input_dict={},caller_area={}): originator = None originator_id = None originator_name = None originator_id = input_dict.get('originator_id') if originator_id: client = dbsession.get(dbmodel.CLIENT, {'client_id':originator_id}, caller_area=caller_area) if client: originator='client' originator_name = client.email else: pointofsale = dbsession.get(dbmodel.POINT_OF_SALE, {'pointofsale_id': originator_id}, caller_area=caller_area) if pointofsale: originator='pointofsale' originator_name = pointofsale.name else: service_point=dbsession.get(dbmodel.SERVICE_POINT, {'servicepoint_id':originator_id}, caller_area=caller_area) if service_point: originator='service_point' originator_id=service_point.service_point_id originator_name = service_point.name else: client_id = input_dict.get('client_id') if client_id: client=dbsession.get(dbmodel.CLIENT, {'client_id':client_id}, caller_area=caller_area) if client: originator='client' originator_id=client_id originator_name = client.email else: pointofsale_id = input_dict.get('pointofsale_id') if pointofsale_id: pointofsale=dbsession.get(dbmodel.POINT_OF_SALE, {'pointofsale_id':pointofsale_id}, caller_area=caller_area) if pointofsale: originator='pointofsale' originator_id=pointofsale_id originator_name = pointofsale.name else: servicepoint_id = input_dict.get('servicepoint_id') if servicepoint_id: service_point=dbsession.get(dbmodel.SERVICE_POINT, {'servicepoint_id':pointofsale_id}, caller_area=caller_area) if service_point: originator='service_point' originator_id=service_point.service_point_id originator_name = service_point.name return (originator, originator_id, originator_name) #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def find_corresponder(dbsession,input_dict={},caller_area={}): corresponder = None corresponder_id = None corresponder_name = None corresponder_id = input_dict.get('corresponder_id') if corresponder_id: client = dbsession.get(dbmodel.CLIENT, {'client_id':corresponder_id}, caller_area=caller_area) if client: corresponder='client' corresponder_name = client.email else: pointofsale = dbsession.get(dbmodel.POINT_OF_SALE, {'pointofsale_id': corresponder_id}, caller_area=caller_area) if pointofsale: corresponder='pointofsale' corresponder_name = pointofsale.name else: service_point=dbsession.get(dbmodel.SERVICE_POINT, {'servicepoint_id':corresponder_id}, caller_area=caller_area) if service_point: corresponder='service_point' corresponder_id=service_point.service_point_id corresponder_name = service_point.name return (corresponder, corresponder_id, corresponder_name) #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def set_msgID(api_name,api_action,api_entity): msgid=f"#C0#api #C9#{api_name}#C0# [{api_entity}]#C0# action [[{api_action.upper()}]]#C0#" return msgid #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def geoloc_to_integer(geoloc): try: d = decimal.Decimal(str(geoloc).replace(",", ".").strip()) except: d = 0 i = int(d * 1000000000) return i #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def integer_to_geoloc(i): try: d = decimal.Decimal(str(i)) except: d = 0 geoloc = d / 1000000000 return geoloc #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: # module initialization #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: module_configuration = retrieve_module_configuration(__file__, module_identityDictionary, module_configuration, print_enabled=consolelog_enabled, filelog_enabled=filelog_enabled, handle_as_init=False) #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: # (print_enabled, filelog_enabled, log_file, errors_file,consolelog_enabled)=get_globals_from_configuration(module_configuration) #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: functions_ids=['dbapi_'] exclude_functions_ids = ['set_msgID', 'set_process_debug_level'] thisModuleObj = sys.modules[__name__] module_configuration.update({'database_apis':[]}) module_configuration = add_apis_to_configuration('database_apis', module_configuration, thisModuleObj, functions_ids, exclude_functions_ids) save_module_configuration(module_identityDictionary, module_configuration, print_enabled=consolelog_enabled, filelog_enabled=filelog_enabled) thisApp.pair_module_configuration('database_apis',module_configuration) #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: save_module_configuration(module_identityDictionary, module_configuration, print_enabled=consolelog_enabled, filelog_enabled=filelog_enabled) #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: if get_module_debug_level(module_id) > 0: apis = thisApp.application_configuration.get('database_apis', {}) for api_name in apis.keys(): api_entry = apis.get(api_name) msg=f'module [[{module_id}]] database api [{api_name} [[[{api_entry}]]]' log_message(msg) #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #import commands # apis = thisApp.application_configuration.get('database_apis', {}) # for api_name in apis.keys(): # api_entry = apis.get(api_name) # msg=f'from {module_id} import {api_name}' # log_message(msg) #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: msg = f'database [ganimides] [[[[module [{module_id}] loaded]]]] with [[version {module_version}]]' if thisApp.get_module_debug_level(module_id): log_message(msg) #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: # main #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: if __name__ == '__main__': #tests/research print(__file__) # caller_area={'aaaa': '11111'} # print('0caller_area=', caller_area) # test_api(caller_area, call_level=-1) # print('4caller_area=', caller_area) #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
labels = [] labels.append(open(f"Labels_swe/Adina_labels.txt", "r",encoding="utf-8").readlines()) labels.append(open(f"Labels_swe/Christian_labels.txt", "r",encoding="utf-8").readlines()) labels.append(open(f"Labels_swe/Liza_labels.txt", "r",encoding="utf-8").readlines()) labels.append(open(f"Labels_swe/Sofia_labels.txt", "r",encoding="utf-8").readlines()) labels_scrab = open(f"Labels_swe/Scrab_labels.txt", "r",encoding="utf-8").readlines() createOrNot = open(f"createdOrNot.txt", "r").readlines() createOrNot = [int(number.strip()) for number in createOrNot] name_roots = ["swe-ad-ad","swe-ch-ch","swe-li-li","swe-so-so"] name_roots_scrab = ["aug-style_1-","aug-style_2-","aug-style_5-","aug-style_7-"] f = open('words_swe.txt','w') p=0 for k in range(len(name_roots)): for i in range(len(labels[0])): filenames = [] if createOrNot[p]: filenames.append(f"{name_roots[k]}{f"{i+1:03}"}_") p+=1 if createOrNot[p]: filenames.append(f"{name_roots[k]}{f"{i+1:03}"}_di") p+=1 if createOrNot[p]: filenames.append(f"{name_roots[k]}{f"{i+1:03}"}_er") p+=1 if createOrNot[p]: filenames.append(f"{name_roots[k]}{f"{i+1:03}"}_ds") p+=1 if createOrNot[p]: filenames.append(f"{name_roots[k]}{f"{i+1:03}"}_dss") p+=1 if createOrNot[p]: filenames.append(f"{name_roots[k]}{f"{i+1:03}"}_es") p+=1 if createOrNot[p]: filenames.append(f"{name_roots[k]}{f"{i+1:03}"}_ess") p+=1 if createOrNot[p]: filenames.append(f"{name_roots[k]}{f"{i+1:03}"}_s") p+=1 if createOrNot[p]: filenames.append(f"{name_roots[k]}{f"{i+1:03}"}_ss") p+=1 for j in range(len(filenames)): f.write(f"{filenames[j]} ok 123 123 123 123 123 ABC {labels[k][i]}") offset = 0 for k in range(len(name_roots_scrab)): for i in range(len(labels_scrab)+offset): filename = [] try: filename.append(f"{name_roots_scrab[k]}{f"{i:05}"}") except FileNotFoundError: print("No such file") offset+=1 f.write(f"{filename[0]} ok 123 123 123 123 123 ABC {labels_scrab[i-offset]}") name_roots_samp = ["samples-ad","samples-ch","samples-er"] sample_lengths = [33, 21, 34] fs = open('samples.txt','w') for k in range(len(name_roots_samp)): for i in range(sample_lengths[k]): fs.write(f"{name_roots_samp[k]}-{i+1} ok 123 123 123 123 123 ABC \n")
labels = [] labels.append(open(f"Labels_swe/Adina_labels.txt", "r",encoding="utf-8").readlines()) labels.append(open(f"Labels_swe/Christian_labels.txt", "r",encoding="utf-8").readlines()) labels.append(open(f"Labels_swe/Liza_labels.txt", "r",encoding="utf-8").readlines()) labels.append(open(f"Labels_swe/Sofia_labels.txt", "r",encoding="utf-8").readlines()) labels_scrab = open(f"Labels_swe/Scrab_labels.txt", "r",encoding="utf-8").readlines() createOrNot = open(f"createdOrNot.txt", "r").readlines() createOrNot = [int(number.strip()) for number in createOrNot] name_roots = ["swe-ad-ad","swe-ch-ch","swe-li-li","swe-so-so"] name_roots_scrab = ["aug-style_1-","aug-style_2-","aug-style_5-","aug-style_7-"] f = open('words_swe.txt','w') p=0 for k in range(len(name_roots)): for i in range(len(labels[0])): filenames = [] if createOrNot[p]: filenames.append(f"{name_roots[k]}{f'{i+1:03}'}_") p+=1 if createOrNot[p]: filenames.append(f"{name_roots[k]}{f'{i+1:03}'}_di") p+=1 if createOrNot[p]: filenames.append(f"{name_roots[k]}{f'{i+1:03}'}_er") p+=1 if createOrNot[p]: filenames.append(f"{name_roots[k]}{f'{i+1:03}'}_ds") p+=1 if createOrNot[p]: filenames.append(f"{name_roots[k]}{f'{i+1:03}'}_dss") p+=1 if createOrNot[p]: filenames.append(f"{name_roots[k]}{f'{i+1:03}'}_es") p+=1 if createOrNot[p]: filenames.append(f"{name_roots[k]}{f'{i+1:03}'}_ess") p+=1 if createOrNot[p]: filenames.append(f"{name_roots[k]}{f'{i+1:03}'}_s") p+=1 if createOrNot[p]: filenames.append(f"{name_roots[k]}{f'{i+1:03}'}_ss") p+=1 for j in range(len(filenames)): f.write(f"{filenames[j]} ok 123 123 123 123 123 ABC {labels[k][i]}") offset = 0 for k in range(len(name_roots_scrab)): for i in range(len(labels_scrab)+offset): filename = [] try: filename.append(f"{name_roots_scrab[k]}{f'{i:05}'}") except FileNotFoundError: print("No such file") offset+=1 f.write(f"{filename[0]} ok 123 123 123 123 123 ABC {labels_scrab[i-offset]}") name_roots_samp = ["samples-ad","samples-ch","samples-er"] sample_lengths = [33, 21, 34] fs = open('samples.txt','w') for k in range(len(name_roots_samp)): for i in range(sample_lengths[k]): fs.write(f"{name_roots_samp[k]}-{i+1} ok 123 123 123 123 123 ABC \n")
from collections import deque from datetime import datetime from enum import Enum from itertools import product from typing import Dict, List, Optional, Tuple, Union import pytest from typing_extensions import Literal from pydantic import BaseModel, ConfigError, Extra, Field, ValidationError, errors, validator from pydantic.class_validators import make_generic_validator, root_validator def test_simple(): class Model(BaseModel): a: str @validator('a') def check_a(cls, v): if 'foobar' not in v: raise ValueError('"foobar" not found in a') return v assert Model(a='this is foobar good').a == 'this is foobar good' with pytest.raises(ValidationError) as exc_info: Model(a='snap') assert exc_info.value.errors() == [{'loc': ('a',), 'msg': '"foobar" not found in a', 'type': 'value_error'}] def test_int_validation(): class Model(BaseModel): a: int with pytest.raises(ValidationError) as exc_info: Model(a='snap') assert exc_info.value.errors() == [ {'loc': ('a',), 'msg': 'value is not a valid integer', 'type': 'type_error.integer'} ] assert Model(a=3).a == 3 assert Model(a=True).a == 1 assert Model(a=False).a == 0 assert Model(a=4.5).a == 4 def test_frozenset_validation(): class Model(BaseModel): a: frozenset with pytest.raises(ValidationError) as exc_info: Model(a='snap') assert exc_info.value.errors() == [ {'loc': ('a',), 'msg': 'value is not a valid frozenset', 'type': 'type_error.frozenset'} ] assert Model(a={1, 2, 3}).a == frozenset({1, 2, 3}) assert Model(a=frozenset({1, 2, 3})).a == frozenset({1, 2, 3}) assert Model(a=[4, 5]).a == frozenset({4, 5}) assert Model(a=(6,)).a == frozenset({6}) def test_deque_validation(): class Model(BaseModel): a: deque with pytest.raises(ValidationError) as exc_info: Model(a='snap') assert exc_info.value.errors() == [{'loc': ('a',), 'msg': 'value is not a valid deque', 'type': 'type_error.deque'}] assert Model(a={1, 2, 3}).a == deque([1, 2, 3]) assert Model(a=deque({1, 2, 3})).a == deque([1, 2, 3]) assert Model(a=[4, 5]).a == deque([4, 5]) assert Model(a=(6,)).a == deque([6]) def test_validate_whole(): class Model(BaseModel): a: List[int] @validator('a', pre=True) def check_a1(cls, v): v.append('123') return v @validator('a') def check_a2(cls, v): v.append(456) return v assert Model(a=[1, 2]).a == [1, 2, 123, 456] def test_validate_kwargs(): class Model(BaseModel): b: int a: List[int] @validator('a', each_item=True) def check_a1(cls, v, values, **kwargs): return v + values['b'] assert Model(a=[1, 2], b=6).a == [7, 8] def test_validate_pre_error(): calls = [] class Model(BaseModel): a: List[int] @validator('a', pre=True) def check_a1(cls, v): calls.append(f'check_a1 {v}') if 1 in v: raise ValueError('a1 broken') v[0] += 1 return v @validator('a') def check_a2(cls, v): calls.append(f'check_a2 {v}') if 10 in v: raise ValueError('a2 broken') return v assert Model(a=[3, 8]).a == [4, 8] assert calls == ['check_a1 [3, 8]', 'check_a2 [4, 8]'] calls = [] with pytest.raises(ValidationError) as exc_info: Model(a=[1, 3]) assert exc_info.value.errors() == [{'loc': ('a',), 'msg': 'a1 broken', 'type': 'value_error'}] assert calls == ['check_a1 [1, 3]'] calls = [] with pytest.raises(ValidationError) as exc_info: Model(a=[5, 10]) assert exc_info.value.errors() == [{'loc': ('a',), 'msg': 'a2 broken', 'type': 'value_error'}] assert calls == ['check_a1 [5, 10]', 'check_a2 [6, 10]'] class ValidateAssignmentModel(BaseModel): a: int = 4 b: str = ... c: int = 0 @validator('b') def b_length(cls, v, values, **kwargs): if 'a' in values and len(v) < values['a']: raise ValueError('b too short') return v @validator('c') def double_c(cls, v): return v * 2 class Config: validate_assignment = True extra = Extra.allow def test_validating_assignment_ok(): p = ValidateAssignmentModel(b='hello') assert p.b == 'hello' def test_validating_assignment_fail(): with pytest.raises(ValidationError): ValidateAssignmentModel(a=10, b='hello') p = ValidateAssignmentModel(b='hello') with pytest.raises(ValidationError): p.b = 'x' def test_validating_assignment_value_change(): p = ValidateAssignmentModel(b='hello', c=2) assert p.c == 4 p = ValidateAssignmentModel(b='hello') assert p.c == 0 p.c = 3 assert p.c == 6 def test_validating_assignment_extra(): p = ValidateAssignmentModel(b='hello', extra_field=1.23) assert p.extra_field == 1.23 p = ValidateAssignmentModel(b='hello') p.extra_field = 1.23 assert p.extra_field == 1.23 p.extra_field = 'bye' assert p.extra_field == 'bye' def test_validating_assignment_dict(): with pytest.raises(ValidationError) as exc_info: ValidateAssignmentModel(a='x', b='xx') assert exc_info.value.errors() == [ {'loc': ('a',), 'msg': 'value is not a valid integer', 'type': 'type_error.integer'} ] def test_validating_assignment_values_dict(): class ModelOne(BaseModel): a: int class ModelTwo(BaseModel): m: ModelOne b: int @validator('b') def validate_b(cls, b, values): if 'm' in values: return b + values['m'].a # this fails if values['m'] is a dict else: return b class Config: validate_assignment = True model = ModelTwo(m=ModelOne(a=1), b=2) assert model.b == 3 model.b = 3 assert model.b == 4 def test_validate_multiple(): # also test TypeError class Model(BaseModel): a: str b: str @validator('a', 'b') def check_a_and_b(cls, v, field, **kwargs): if len(v) < 4: raise TypeError(f'{field.alias} is too short') return v + 'x' assert Model(a='1234', b='5678').dict() == {'a': '1234x', 'b': '5678x'} with pytest.raises(ValidationError) as exc_info: Model(a='x', b='x') assert exc_info.value.errors() == [ {'loc': ('a',), 'msg': 'a is too short', 'type': 'type_error'}, {'loc': ('b',), 'msg': 'b is too short', 'type': 'type_error'}, ] def test_classmethod(): class Model(BaseModel): a: str @validator('a') def check_a(cls, v): assert cls is Model return v m = Model(a='this is foobar good') assert m.a == 'this is foobar good' m.check_a('x') def test_duplicates(): with pytest.raises(errors.ConfigError) as exc_info: class Model(BaseModel): a: str b: str @validator('a') def duplicate_name(cls, v): return v @validator('b') # noqa def duplicate_name(cls, v): # noqa return v assert str(exc_info.value) == ( 'duplicate validator function ' '"tests.test_validators.test_duplicates.<locals>.Model.duplicate_name"; ' 'if this is intended, set `allow_reuse=True`' ) def test_use_bare(): with pytest.raises(errors.ConfigError) as exc_info: class Model(BaseModel): a: str @validator def checker(cls, v): return v assert 'validators should be used with fields' in str(exc_info.value) def test_use_no_fields(): with pytest.raises(errors.ConfigError) as exc_info: class Model(BaseModel): a: str @validator() def checker(cls, v): return v assert 'validator with no fields specified' in str(exc_info.value) def test_validate_always(): check_calls = 0 class Model(BaseModel): a: str = None @validator('a', pre=True, always=True) def check_a(cls, v): nonlocal check_calls check_calls += 1 return v or 'xxx' assert Model().a == 'xxx' assert check_calls == 1 assert Model(a='y').a == 'y' assert check_calls == 2 def test_validate_always_on_inheritance(): check_calls = 0 class ParentModel(BaseModel): a: str = None class Model(ParentModel): @validator('a', pre=True, always=True) def check_a(cls, v): nonlocal check_calls check_calls += 1 return v or 'xxx' assert Model().a == 'xxx' assert check_calls == 1 assert Model(a='y').a == 'y' assert check_calls == 2 def test_validate_not_always(): check_calls = 0 class Model(BaseModel): a: str = None @validator('a', pre=True) def check_a(cls, v): nonlocal check_calls check_calls += 1 return v or 'xxx' assert Model().a is None assert check_calls == 0 assert Model(a='y').a == 'y' assert check_calls == 1 def test_wildcard_validators(): calls = [] class Model(BaseModel): a: str b: int @validator('a') def check_a(cls, v, field, **kwargs): calls.append(('check_a', v, field.name)) return v @validator('*') def check_all(cls, v, field, **kwargs): calls.append(('check_all', v, field.name)) return v assert Model(a='abc', b='123').dict() == dict(a='abc', b=123) assert calls == [('check_a', 'abc', 'a'), ('check_all', 'abc', 'a'), ('check_all', 123, 'b')] def test_wildcard_validator_error(): class Model(BaseModel): a: str b: str @validator('*') def check_all(cls, v, field, **kwargs): if 'foobar' not in v: raise ValueError('"foobar" not found in a') return v assert Model(a='foobar a', b='foobar b').b == 'foobar b' with pytest.raises(ValidationError) as exc_info: Model(a='snap') assert exc_info.value.errors() == [ {'loc': ('a',), 'msg': '"foobar" not found in a', 'type': 'value_error'}, {'loc': ('b',), 'msg': 'field required', 'type': 'value_error.missing'}, ] def test_invalid_field(): with pytest.raises(errors.ConfigError) as exc_info: class Model(BaseModel): a: str @validator('b') def check_b(cls, v): return v assert str(exc_info.value) == ( "Validators defined with incorrect fields: check_b " # noqa: Q000 "(use check_fields=False if you're inheriting from the model and intended this)" ) def test_validate_child(): class Parent(BaseModel): a: str class Child(Parent): @validator('a') def check_a(cls, v): if 'foobar' not in v: raise ValueError('"foobar" not found in a') return v assert Parent(a='this is not a child').a == 'this is not a child' assert Child(a='this is foobar good').a == 'this is foobar good' with pytest.raises(ValidationError): Child(a='snap') def test_validate_child_extra(): class Parent(BaseModel): a: str @validator('a') def check_a_one(cls, v): if 'foobar' not in v: raise ValueError('"foobar" not found in a') return v class Child(Parent): @validator('a') def check_a_two(cls, v): return v.upper() assert Parent(a='this is foobar good').a == 'this is foobar good' assert Child(a='this is foobar good').a == 'THIS IS FOOBAR GOOD' with pytest.raises(ValidationError): Child(a='snap') def test_validate_child_all(): class Parent(BaseModel): a: str class Child(Parent): @validator('*') def check_a(cls, v): if 'foobar' not in v: raise ValueError('"foobar" not found in a') return v assert Parent(a='this is not a child').a == 'this is not a child' assert Child(a='this is foobar good').a == 'this is foobar good' with pytest.raises(ValidationError): Child(a='snap') def test_validate_parent(): class Parent(BaseModel): a: str @validator('a') def check_a(cls, v): if 'foobar' not in v: raise ValueError('"foobar" not found in a') return v class Child(Parent): pass assert Parent(a='this is foobar good').a == 'this is foobar good' assert Child(a='this is foobar good').a == 'this is foobar good' with pytest.raises(ValidationError): Parent(a='snap') with pytest.raises(ValidationError): Child(a='snap') def test_validate_parent_all(): class Parent(BaseModel): a: str @validator('*') def check_a(cls, v): if 'foobar' not in v: raise ValueError('"foobar" not found in a') return v class Child(Parent): pass assert Parent(a='this is foobar good').a == 'this is foobar good' assert Child(a='this is foobar good').a == 'this is foobar good' with pytest.raises(ValidationError): Parent(a='snap') with pytest.raises(ValidationError): Child(a='snap') def test_inheritance_keep(): class Parent(BaseModel): a: int @validator('a') def add_to_a(cls, v): return v + 1 class Child(Parent): pass assert Child(a=0).a == 1 def test_inheritance_replace(): class Parent(BaseModel): a: int @validator('a') def add_to_a(cls, v): return v + 1 class Child(Parent): @validator('a') def add_to_a(cls, v): return v + 5 assert Child(a=0).a == 5 def test_inheritance_new(): class Parent(BaseModel): a: int @validator('a') def add_one_to_a(cls, v): return v + 1 class Child(Parent): @validator('a') def add_five_to_a(cls, v): return v + 5 assert Child(a=0).a == 6 def test_validation_each_item(): class Model(BaseModel): foobar: Dict[int, int] @validator('foobar', each_item=True) def check_foobar(cls, v): return v + 1 assert Model(foobar={1: 1}).foobar == {1: 2} def test_validation_each_item_one_sublevel(): class Model(BaseModel): foobar: List[Tuple[int, int]] @validator('foobar', each_item=True) def check_foobar(cls, v: Tuple[int, int]) -> Tuple[int, int]: v1, v2 = v assert v1 == v2 return v assert Model(foobar=[(1, 1), (2, 2)]).foobar == [(1, 1), (2, 2)] def test_key_validation(): class Model(BaseModel): foobar: Dict[int, int] @validator('foobar') def check_foobar(cls, value): return {k + 1: v + 1 for k, v in value.items()} assert Model(foobar={1: 1}).foobar == {2: 2} def test_validator_always_optional(): check_calls = 0 class Model(BaseModel): a: Optional[str] = None @validator('a', pre=True, always=True) def check_a(cls, v): nonlocal check_calls check_calls += 1 return v or 'default value' assert Model(a='y').a == 'y' assert check_calls == 1 assert Model().a == 'default value' assert check_calls == 2 def test_validator_always_pre(): check_calls = 0 class Model(BaseModel): a: str = None @validator('a', always=True, pre=True) def check_a(cls, v): nonlocal check_calls check_calls += 1 return v or 'default value' assert Model(a='y').a == 'y' assert Model().a == 'default value' assert check_calls == 2 def test_validator_always_post(): class Model(BaseModel): a: str = None @validator('a', always=True) def check_a(cls, v): return v or 'default value' assert Model(a='y').a == 'y' assert Model().a == 'default value' def test_validator_always_post_optional(): class Model(BaseModel): a: Optional[str] = None @validator('a', always=True, pre=True) def check_a(cls, v): return v or 'default value' assert Model(a='y').a == 'y' assert Model().a == 'default value' def test_datetime_validator(): check_calls = 0 class Model(BaseModel): d: datetime = None @validator('d', pre=True, always=True) def check_d(cls, v): nonlocal check_calls check_calls += 1 return v or datetime(2032, 1, 1) assert Model(d='2023-01-01T00:00:00').d == datetime(2023, 1, 1) assert check_calls == 1 assert Model().d == datetime(2032, 1, 1) assert check_calls == 2 assert Model(d=datetime(2023, 1, 1)).d == datetime(2023, 1, 1) assert check_calls == 3 def test_pre_called_once(): check_calls = 0 class Model(BaseModel): a: Tuple[int, int, int] @validator('a', pre=True) def check_a(cls, v): nonlocal check_calls check_calls += 1 return v assert Model(a=['1', '2', '3']).a == (1, 2, 3) assert check_calls == 1 @pytest.mark.parametrize( 'fields,result', [ (['val'], '_v_'), (['foobar'], '_v_'), (['val', 'field'], '_v_,_field_'), (['val', 'config'], '_v_,_config_'), (['val', 'values'], '_v_,_values_'), (['val', 'field', 'config'], '_v_,_field_,_config_'), (['val', 'field', 'values'], '_v_,_field_,_values_'), (['val', 'config', 'values'], '_v_,_config_,_values_'), (['val', 'field', 'values', 'config'], '_v_,_field_,_values_,_config_'), (['cls', 'val'], '_cls_,_v_'), (['cls', 'foobar'], '_cls_,_v_'), (['cls', 'val', 'field'], '_cls_,_v_,_field_'), (['cls', 'val', 'config'], '_cls_,_v_,_config_'), (['cls', 'val', 'values'], '_cls_,_v_,_values_'), (['cls', 'val', 'field', 'config'], '_cls_,_v_,_field_,_config_'), (['cls', 'val', 'field', 'values'], '_cls_,_v_,_field_,_values_'), (['cls', 'val', 'config', 'values'], '_cls_,_v_,_config_,_values_'), (['cls', 'val', 'field', 'values', 'config'], '_cls_,_v_,_field_,_values_,_config_'), ], ) def test_make_generic_validator(fields, result): exec(f"""def testing_function({", ".join(fields)}): return {" + "," + ".join(fields)}""") func = locals()['testing_function'] validator = make_generic_validator(func) assert validator.__qualname__ == 'testing_function' assert validator.__name__ == 'testing_function' # args: cls, v, values, field, config assert validator('_cls_', '_v_', '_values_', '_field_', '_config_') == result def test_make_generic_validator_kwargs(): def test_validator(v, **kwargs): return ', '.join(f'{k}: {v}' for k, v in kwargs.items()) validator = make_generic_validator(test_validator) assert validator.__name__ == 'test_validator' assert validator('_cls_', '_v_', '_vs_', '_f_', '_c_') == 'values: _vs_, field: _f_, config: _c_' def test_make_generic_validator_invalid(): def test_validator(v, foobar): return foobar with pytest.raises(ConfigError) as exc_info: make_generic_validator(test_validator) assert ': (v, foobar), should be: (value, values, config, field)' in str(exc_info.value) def test_make_generic_validator_cls_kwargs(): def test_validator(cls, v, **kwargs): return ', '.join(f'{k}: {v}' for k, v in kwargs.items()) validator = make_generic_validator(test_validator) assert validator.__name__ == 'test_validator' assert validator('_cls_', '_v_', '_vs_', '_f_', '_c_') == 'values: _vs_, field: _f_, config: _c_' def test_make_generic_validator_cls_invalid(): def test_validator(cls, v, foobar): return foobar with pytest.raises(ConfigError) as exc_info: make_generic_validator(test_validator) assert ': (cls, v, foobar), should be: (cls, value, values, config, field)' in str(exc_info.value) def test_make_generic_validator_self(): def test_validator(self, v): return v with pytest.raises(ConfigError) as exc_info: make_generic_validator(test_validator) assert ': (self, v), "self" not permitted as first argument, should be: (cls, value' in str(exc_info.value) def test_assert_raises_validation_error(): class Model(BaseModel): a: str @validator('a') def check_a(cls, v): assert v == 'a', 'invalid a' return v Model(a='a') with pytest.raises(ValidationError) as exc_info: Model(a='snap') injected_by_pytest = "\nassert 'snap' == 'a'\n - a\n + snap" assert exc_info.value.errors() == [ {'loc': ('a',), 'msg': f'invalid a{injected_by_pytest}', 'type': 'assertion_error'} ] def test_whole(): with pytest.warns(DeprecationWarning, match='The "whole" keyword argument is deprecated'): class Model(BaseModel): x: List[int] @validator('x', whole=True) def check_something(cls, v): return v def test_root_validator(): root_val_values = [] class Model(BaseModel): a: int = 1 b: str c: str @validator('b') def repeat_b(cls, v): return v * 2 @root_validator def example_root_validator(cls, values): root_val_values.append(values) if 'snap' in values.get('b', ''): raise ValueError('foobar') return dict(values, b='changed') @root_validator def example_root_validator2(cls, values): root_val_values.append(values) if 'snap' in values.get('c', ''): raise ValueError('foobar2') return dict(values, c='changed') assert Model(a='123', b='bar', c='baz').dict() == {'a': 123, 'b': 'changed', 'c': 'changed'} with pytest.raises(ValidationError) as exc_info: Model(b='snap dragon', c='snap dragon2') assert exc_info.value.errors() == [ {'loc': ('__root__',), 'msg': 'foobar', 'type': 'value_error'}, {'loc': ('__root__',), 'msg': 'foobar2', 'type': 'value_error'}, ] with pytest.raises(ValidationError) as exc_info: Model(a='broken', b='bar', c='baz') assert exc_info.value.errors() == [ {'loc': ('a',), 'msg': 'value is not a valid integer', 'type': 'type_error.integer'} ] assert root_val_values == [ {'a': 123, 'b': 'barbar', 'c': 'baz'}, {'a': 123, 'b': 'changed', 'c': 'baz'}, {'a': 1, 'b': 'snap dragonsnap dragon', 'c': 'snap dragon2'}, {'a': 1, 'b': 'snap dragonsnap dragon', 'c': 'snap dragon2'}, {'b': 'barbar', 'c': 'baz'}, {'b': 'changed', 'c': 'baz'}, ] def test_root_validator_pre(): root_val_values = [] class Model(BaseModel): a: int = 1 b: str @validator('b') def repeat_b(cls, v): return v * 2 @root_validator(pre=True) def root_validator(cls, values): root_val_values.append(values) if 'snap' in values.get('b', ''): raise ValueError('foobar') return {'a': 42, 'b': 'changed'} assert Model(a='123', b='bar').dict() == {'a': 42, 'b': 'changedchanged'} with pytest.raises(ValidationError) as exc_info: Model(b='snap dragon') assert root_val_values == [{'a': '123', 'b': 'bar'}, {'b': 'snap dragon'}] assert exc_info.value.errors() == [{'loc': ('__root__',), 'msg': 'foobar', 'type': 'value_error'}] def test_root_validator_repeat(): with pytest.raises(errors.ConfigError, match='duplicate validator function'): class Model(BaseModel): a: int = 1 @root_validator def root_validator_repeated(cls, values): return values @root_validator # noqa: F811 def root_validator_repeated(cls, values): # noqa: F811 return values def test_root_validator_repeat2(): with pytest.raises(errors.ConfigError, match='duplicate validator function'): class Model(BaseModel): a: int = 1 @validator('a') def repeat_validator(cls, v): return v @root_validator(pre=True) # noqa: F811 def repeat_validator(cls, values): # noqa: F811 return values def test_root_validator_self(): with pytest.raises( errors.ConfigError, match=r'Invalid signature for root validator root_validator: \(self, values\)' ): class Model(BaseModel): a: int = 1 @root_validator def root_validator(self, values): return values def test_root_validator_extra(): with pytest.raises(errors.ConfigError) as exc_info: class Model(BaseModel): a: int = 1 @root_validator def root_validator(cls, values, another): return values assert str(exc_info.value) == ( 'Invalid signature for root validator root_validator: (cls, values, another), should be: (cls, values).' ) def test_root_validator_types(): root_val_values = None class Model(BaseModel): a: int = 1 b: str @root_validator def root_validator(cls, values): nonlocal root_val_values root_val_values = cls, values return values class Config: extra = Extra.allow assert Model(b='bar', c='wobble').dict() == {'a': 1, 'b': 'bar', 'c': 'wobble'} assert root_val_values == (Model, {'a': 1, 'b': 'bar', 'c': 'wobble'}) def test_root_validator_inheritance(): calls = [] class Parent(BaseModel): pass @root_validator def root_validator_parent(cls, values): calls.append(f'parent validator: {values}') return {'extra1': 1, **values} class Child(Parent): a: int @root_validator def root_validator_child(cls, values): calls.append(f'child validator: {values}') return {'extra2': 2, **values} assert len(Child.__post_root_validators__) == 2 assert len(Child.__pre_root_validators__) == 0 assert Child(a=123).dict() == {'extra2': 2, 'extra1': 1, 'a': 123} assert calls == ["parent validator: {'a': 123}", "child validator: {'extra1': 1, 'a': 123}"] def test_root_validator_returns_none_exception(): class Model(BaseModel): a: int = 1 @root_validator def root_validator_repeated(cls, values): return None with pytest.raises(TypeError, match='Model values must be a dict'): Model() def reusable_validator(num): return num * 2 def test_reuse_global_validators(): class Model(BaseModel): x: int y: int double_x = validator('x', allow_reuse=True)(reusable_validator) double_y = validator('y', allow_reuse=True)(reusable_validator) assert dict(Model(x=1, y=1)) == {'x': 2, 'y': 2} def declare_with_reused_validators(include_root, allow_1, allow_2, allow_3): class Model(BaseModel): a: str b: str @validator('a', allow_reuse=allow_1) def duplicate_name(cls, v): return v @validator('b', allow_reuse=allow_2) # noqa F811 def duplicate_name(cls, v): # noqa F811 return v if include_root: @root_validator(allow_reuse=allow_3) # noqa F811 def duplicate_name(cls, values): # noqa F811 return values @pytest.fixture def reset_tracked_validators(): from pydantic.class_validators import _FUNCS original_tracked_validators = set(_FUNCS) yield _FUNCS.clear() _FUNCS.update(original_tracked_validators) @pytest.mark.parametrize('include_root,allow_1,allow_2,allow_3', product(*[[True, False]] * 4)) def test_allow_reuse(include_root, allow_1, allow_2, allow_3, reset_tracked_validators): duplication_count = int(not allow_1) + int(not allow_2) + int(include_root and not allow_3) if duplication_count > 1: with pytest.raises(ConfigError) as exc_info: declare_with_reused_validators(include_root, allow_1, allow_2, allow_3) assert str(exc_info.value).startswith('duplicate validator function') else: declare_with_reused_validators(include_root, allow_1, allow_2, allow_3) @pytest.mark.parametrize('validator_classmethod,root_validator_classmethod', product(*[[True, False]] * 2)) def test_root_validator_classmethod(validator_classmethod, root_validator_classmethod, reset_tracked_validators): root_val_values = [] class Model(BaseModel): a: int = 1 b: str def repeat_b(cls, v): return v * 2 if validator_classmethod: repeat_b = classmethod(repeat_b) repeat_b = validator('b')(repeat_b) def example_root_validator(cls, values): root_val_values.append(values) if 'snap' in values.get('b', ''): raise ValueError('foobar') return dict(values, b='changed') if root_validator_classmethod: example_root_validator = classmethod(example_root_validator) example_root_validator = root_validator(example_root_validator) assert Model(a='123', b='bar').dict() == {'a': 123, 'b': 'changed'} with pytest.raises(ValidationError) as exc_info: Model(b='snap dragon') assert exc_info.value.errors() == [{'loc': ('__root__',), 'msg': 'foobar', 'type': 'value_error'}] with pytest.raises(ValidationError) as exc_info: Model(a='broken', b='bar') assert exc_info.value.errors() == [ {'loc': ('a',), 'msg': 'value is not a valid integer', 'type': 'type_error.integer'} ] assert root_val_values == [{'a': 123, 'b': 'barbar'}, {'a': 1, 'b': 'snap dragonsnap dragon'}, {'b': 'barbar'}] def test_root_validator_skip_on_failure(): a_called = False class ModelA(BaseModel): a: int @root_validator def example_root_validator(cls, values): nonlocal a_called a_called = True with pytest.raises(ValidationError): ModelA(a='a') assert a_called b_called = False class ModelB(BaseModel): a: int @root_validator(skip_on_failure=True) def example_root_validator(cls, values): nonlocal b_called b_called = True with pytest.raises(ValidationError): ModelB(a='a') assert not b_called def test_assignment_validator_cls(): validator_calls = 0 class Model(BaseModel): name: str class Config: validate_assignment = True @validator('name') def check_foo(cls, value): nonlocal validator_calls validator_calls += 1 assert cls == Model return value m = Model(name='hello') m.name = 'goodbye' assert validator_calls == 2 def test_literal_validator(): class Model(BaseModel): a: Literal['foo'] Model(a='foo') with pytest.raises(ValidationError) as exc_info: Model(a='nope') assert exc_info.value.errors() == [ { 'loc': ('a',), 'msg': "unexpected value; permitted: 'foo'", 'type': 'value_error.const', 'ctx': {'given': 'nope', 'permitted': ('foo',)}, } ] def test_literal_validator_str_enum(): class Bar(str, Enum): FIZ = 'fiz' FUZ = 'fuz' class Foo(BaseModel): bar: Bar barfiz: Literal[Bar.FIZ] fizfuz: Literal[Bar.FIZ, Bar.FUZ] my_foo = Foo.parse_obj({'bar': 'fiz', 'barfiz': 'fiz', 'fizfuz': 'fiz'}) assert my_foo.bar is Bar.FIZ assert my_foo.barfiz is Bar.FIZ assert my_foo.fizfuz is Bar.FIZ my_foo = Foo.parse_obj({'bar': 'fiz', 'barfiz': 'fiz', 'fizfuz': 'fuz'}) assert my_foo.bar is Bar.FIZ assert my_foo.barfiz is Bar.FIZ assert my_foo.fizfuz is Bar.FUZ def test_nested_literal_validator(): L1 = Literal['foo'] L2 = Literal['bar'] class Model(BaseModel): a: Literal[L1, L2] Model(a='foo') with pytest.raises(ValidationError) as exc_info: Model(a='nope') assert exc_info.value.errors() == [ { 'loc': ('a',), 'msg': "unexpected value; permitted: 'foo', 'bar'", 'type': 'value_error.const', 'ctx': {'given': 'nope', 'permitted': ('foo', 'bar')}, } ] def test_union_literal_with_constraints(): class Model(BaseModel, validate_assignment=True): x: Union[Literal[42], Literal['pika']] = Field(allow_mutation=False) m = Model(x=42) with pytest.raises(TypeError): m.x += 1 def test_field_that_is_being_validated_is_excluded_from_validator_values(mocker): check_values = mocker.MagicMock() class Model(BaseModel): foo: str bar: str = Field(alias='pika') baz: str class Config: validate_assignment = True @validator('foo') def validate_foo(cls, v, values): check_values({**values}) return v @validator('bar') def validate_bar(cls, v, values): check_values({**values}) return v model = Model(foo='foo_value', pika='bar_value', baz='baz_value') check_values.reset_mock() assert list(dict(model).items()) == [('foo', 'foo_value'), ('bar', 'bar_value'), ('baz', 'baz_value')] model.foo = 'new_foo_value' check_values.assert_called_once_with({'bar': 'bar_value', 'baz': 'baz_value'}) check_values.reset_mock() model.bar = 'new_bar_value' check_values.assert_called_once_with({'foo': 'new_foo_value', 'baz': 'baz_value'}) # ensure field order is the same assert list(dict(model).items()) == [('foo', 'new_foo_value'), ('bar', 'new_bar_value'), ('baz', 'baz_value')] def test_exceptions_in_field_validators_restore_original_field_value(): class Model(BaseModel): foo: str class Config: validate_assignment = True @validator('foo') def validate_foo(cls, v): if v == 'raise_exception': raise RuntimeError('test error') return v model = Model(foo='foo') with pytest.raises(RuntimeError, match='test error'): model.foo = 'raise_exception' assert model.foo == 'foo' def test_overridden_root_validators(mocker): validate_stub = mocker.stub(name='validate') class A(BaseModel): x: str @root_validator(pre=True) def pre_root(cls, values): validate_stub('A', 'pre') return values @root_validator(pre=False) def post_root(cls, values): validate_stub('A', 'post') return values class B(A): @root_validator(pre=True) def pre_root(cls, values): validate_stub('B', 'pre') return values @root_validator(pre=False) def post_root(cls, values): validate_stub('B', 'post') return values A(x='pika') assert validate_stub.call_args_list == [mocker.call('A', 'pre'), mocker.call('A', 'post')] validate_stub.reset_mock() B(x='pika') assert validate_stub.call_args_list == [mocker.call('B', 'pre'), mocker.call('B', 'post')]
from collections import deque from datetime import datetime from enum import Enum from itertools import product from typing import Dict, List, Optional, Tuple, Union import pytest from typing_extensions import Literal from pydantic import BaseModel, ConfigError, Extra, Field, ValidationError, errors, validator from pydantic.class_validators import make_generic_validator, root_validator def test_simple(): class Model(BaseModel): a: str @validator('a') def check_a(cls, v): if 'foobar' not in v: raise ValueError('"foobar" not found in a') return v assert Model(a='this is foobar good').a == 'this is foobar good' with pytest.raises(ValidationError) as exc_info: Model(a='snap') assert exc_info.value.errors() == [{'loc': ('a',), 'msg': '"foobar" not found in a', 'type': 'value_error'}] def test_int_validation(): class Model(BaseModel): a: int with pytest.raises(ValidationError) as exc_info: Model(a='snap') assert exc_info.value.errors() == [ {'loc': ('a',), 'msg': 'value is not a valid integer', 'type': 'type_error.integer'} ] assert Model(a=3).a == 3 assert Model(a=True).a == 1 assert Model(a=False).a == 0 assert Model(a=4.5).a == 4 def test_frozenset_validation(): class Model(BaseModel): a: frozenset with pytest.raises(ValidationError) as exc_info: Model(a='snap') assert exc_info.value.errors() == [ {'loc': ('a',), 'msg': 'value is not a valid frozenset', 'type': 'type_error.frozenset'} ] assert Model(a={1, 2, 3}).a == frozenset({1, 2, 3}) assert Model(a=frozenset({1, 2, 3})).a == frozenset({1, 2, 3}) assert Model(a=[4, 5]).a == frozenset({4, 5}) assert Model(a=(6,)).a == frozenset({6}) def test_deque_validation(): class Model(BaseModel): a: deque with pytest.raises(ValidationError) as exc_info: Model(a='snap') assert exc_info.value.errors() == [{'loc': ('a',), 'msg': 'value is not a valid deque', 'type': 'type_error.deque'}] assert Model(a={1, 2, 3}).a == deque([1, 2, 3]) assert Model(a=deque({1, 2, 3})).a == deque([1, 2, 3]) assert Model(a=[4, 5]).a == deque([4, 5]) assert Model(a=(6,)).a == deque([6]) def test_validate_whole(): class Model(BaseModel): a: List[int] @validator('a', pre=True) def check_a1(cls, v): v.append('123') return v @validator('a') def check_a2(cls, v): v.append(456) return v assert Model(a=[1, 2]).a == [1, 2, 123, 456] def test_validate_kwargs(): class Model(BaseModel): b: int a: List[int] @validator('a', each_item=True) def check_a1(cls, v, values, **kwargs): return v + values['b'] assert Model(a=[1, 2], b=6).a == [7, 8] def test_validate_pre_error(): calls = [] class Model(BaseModel): a: List[int] @validator('a', pre=True) def check_a1(cls, v): calls.append(f'check_a1 {v}') if 1 in v: raise ValueError('a1 broken') v[0] += 1 return v @validator('a') def check_a2(cls, v): calls.append(f'check_a2 {v}') if 10 in v: raise ValueError('a2 broken') return v assert Model(a=[3, 8]).a == [4, 8] assert calls == ['check_a1 [3, 8]', 'check_a2 [4, 8]'] calls = [] with pytest.raises(ValidationError) as exc_info: Model(a=[1, 3]) assert exc_info.value.errors() == [{'loc': ('a',), 'msg': 'a1 broken', 'type': 'value_error'}] assert calls == ['check_a1 [1, 3]'] calls = [] with pytest.raises(ValidationError) as exc_info: Model(a=[5, 10]) assert exc_info.value.errors() == [{'loc': ('a',), 'msg': 'a2 broken', 'type': 'value_error'}] assert calls == ['check_a1 [5, 10]', 'check_a2 [6, 10]'] class ValidateAssignmentModel(BaseModel): a: int = 4 b: str = ... c: int = 0 @validator('b') def b_length(cls, v, values, **kwargs): if 'a' in values and len(v) < values['a']: raise ValueError('b too short') return v @validator('c') def double_c(cls, v): return v * 2 class Config: validate_assignment = True extra = Extra.allow def test_validating_assignment_ok(): p = ValidateAssignmentModel(b='hello') assert p.b == 'hello' def test_validating_assignment_fail(): with pytest.raises(ValidationError): ValidateAssignmentModel(a=10, b='hello') p = ValidateAssignmentModel(b='hello') with pytest.raises(ValidationError): p.b = 'x' def test_validating_assignment_value_change(): p = ValidateAssignmentModel(b='hello', c=2) assert p.c == 4 p = ValidateAssignmentModel(b='hello') assert p.c == 0 p.c = 3 assert p.c == 6 def test_validating_assignment_extra(): p = ValidateAssignmentModel(b='hello', extra_field=1.23) assert p.extra_field == 1.23 p = ValidateAssignmentModel(b='hello') p.extra_field = 1.23 assert p.extra_field == 1.23 p.extra_field = 'bye' assert p.extra_field == 'bye' def test_validating_assignment_dict(): with pytest.raises(ValidationError) as exc_info: ValidateAssignmentModel(a='x', b='xx') assert exc_info.value.errors() == [ {'loc': ('a',), 'msg': 'value is not a valid integer', 'type': 'type_error.integer'} ] def test_validating_assignment_values_dict(): class ModelOne(BaseModel): a: int class ModelTwo(BaseModel): m: ModelOne b: int @validator('b') def validate_b(cls, b, values): if 'm' in values: return b + values['m'].a # this fails if values['m'] is a dict else: return b class Config: validate_assignment = True model = ModelTwo(m=ModelOne(a=1), b=2) assert model.b == 3 model.b = 3 assert model.b == 4 def test_validate_multiple(): # also test TypeError class Model(BaseModel): a: str b: str @validator('a', 'b') def check_a_and_b(cls, v, field, **kwargs): if len(v) < 4: raise TypeError(f'{field.alias} is too short') return v + 'x' assert Model(a='1234', b='5678').dict() == {'a': '1234x', 'b': '5678x'} with pytest.raises(ValidationError) as exc_info: Model(a='x', b='x') assert exc_info.value.errors() == [ {'loc': ('a',), 'msg': 'a is too short', 'type': 'type_error'}, {'loc': ('b',), 'msg': 'b is too short', 'type': 'type_error'}, ] def test_classmethod(): class Model(BaseModel): a: str @validator('a') def check_a(cls, v): assert cls is Model return v m = Model(a='this is foobar good') assert m.a == 'this is foobar good' m.check_a('x') def test_duplicates(): with pytest.raises(errors.ConfigError) as exc_info: class Model(BaseModel): a: str b: str @validator('a') def duplicate_name(cls, v): return v @validator('b') # noqa def duplicate_name(cls, v): # noqa return v assert str(exc_info.value) == ( 'duplicate validator function ' '"tests.test_validators.test_duplicates.<locals>.Model.duplicate_name"; ' 'if this is intended, set `allow_reuse=True`' ) def test_use_bare(): with pytest.raises(errors.ConfigError) as exc_info: class Model(BaseModel): a: str @validator def checker(cls, v): return v assert 'validators should be used with fields' in str(exc_info.value) def test_use_no_fields(): with pytest.raises(errors.ConfigError) as exc_info: class Model(BaseModel): a: str @validator() def checker(cls, v): return v assert 'validator with no fields specified' in str(exc_info.value) def test_validate_always(): check_calls = 0 class Model(BaseModel): a: str = None @validator('a', pre=True, always=True) def check_a(cls, v): nonlocal check_calls check_calls += 1 return v or 'xxx' assert Model().a == 'xxx' assert check_calls == 1 assert Model(a='y').a == 'y' assert check_calls == 2 def test_validate_always_on_inheritance(): check_calls = 0 class ParentModel(BaseModel): a: str = None class Model(ParentModel): @validator('a', pre=True, always=True) def check_a(cls, v): nonlocal check_calls check_calls += 1 return v or 'xxx' assert Model().a == 'xxx' assert check_calls == 1 assert Model(a='y').a == 'y' assert check_calls == 2 def test_validate_not_always(): check_calls = 0 class Model(BaseModel): a: str = None @validator('a', pre=True) def check_a(cls, v): nonlocal check_calls check_calls += 1 return v or 'xxx' assert Model().a is None assert check_calls == 0 assert Model(a='y').a == 'y' assert check_calls == 1 def test_wildcard_validators(): calls = [] class Model(BaseModel): a: str b: int @validator('a') def check_a(cls, v, field, **kwargs): calls.append(('check_a', v, field.name)) return v @validator('*') def check_all(cls, v, field, **kwargs): calls.append(('check_all', v, field.name)) return v assert Model(a='abc', b='123').dict() == dict(a='abc', b=123) assert calls == [('check_a', 'abc', 'a'), ('check_all', 'abc', 'a'), ('check_all', 123, 'b')] def test_wildcard_validator_error(): class Model(BaseModel): a: str b: str @validator('*') def check_all(cls, v, field, **kwargs): if 'foobar' not in v: raise ValueError('"foobar" not found in a') return v assert Model(a='foobar a', b='foobar b').b == 'foobar b' with pytest.raises(ValidationError) as exc_info: Model(a='snap') assert exc_info.value.errors() == [ {'loc': ('a',), 'msg': '"foobar" not found in a', 'type': 'value_error'}, {'loc': ('b',), 'msg': 'field required', 'type': 'value_error.missing'}, ] def test_invalid_field(): with pytest.raises(errors.ConfigError) as exc_info: class Model(BaseModel): a: str @validator('b') def check_b(cls, v): return v assert str(exc_info.value) == ( "Validators defined with incorrect fields: check_b " # noqa: Q000 "(use check_fields=False if you're inheriting from the model and intended this)" ) def test_validate_child(): class Parent(BaseModel): a: str class Child(Parent): @validator('a') def check_a(cls, v): if 'foobar' not in v: raise ValueError('"foobar" not found in a') return v assert Parent(a='this is not a child').a == 'this is not a child' assert Child(a='this is foobar good').a == 'this is foobar good' with pytest.raises(ValidationError): Child(a='snap') def test_validate_child_extra(): class Parent(BaseModel): a: str @validator('a') def check_a_one(cls, v): if 'foobar' not in v: raise ValueError('"foobar" not found in a') return v class Child(Parent): @validator('a') def check_a_two(cls, v): return v.upper() assert Parent(a='this is foobar good').a == 'this is foobar good' assert Child(a='this is foobar good').a == 'THIS IS FOOBAR GOOD' with pytest.raises(ValidationError): Child(a='snap') def test_validate_child_all(): class Parent(BaseModel): a: str class Child(Parent): @validator('*') def check_a(cls, v): if 'foobar' not in v: raise ValueError('"foobar" not found in a') return v assert Parent(a='this is not a child').a == 'this is not a child' assert Child(a='this is foobar good').a == 'this is foobar good' with pytest.raises(ValidationError): Child(a='snap') def test_validate_parent(): class Parent(BaseModel): a: str @validator('a') def check_a(cls, v): if 'foobar' not in v: raise ValueError('"foobar" not found in a') return v class Child(Parent): pass assert Parent(a='this is foobar good').a == 'this is foobar good' assert Child(a='this is foobar good').a == 'this is foobar good' with pytest.raises(ValidationError): Parent(a='snap') with pytest.raises(ValidationError): Child(a='snap') def test_validate_parent_all(): class Parent(BaseModel): a: str @validator('*') def check_a(cls, v): if 'foobar' not in v: raise ValueError('"foobar" not found in a') return v class Child(Parent): pass assert Parent(a='this is foobar good').a == 'this is foobar good' assert Child(a='this is foobar good').a == 'this is foobar good' with pytest.raises(ValidationError): Parent(a='snap') with pytest.raises(ValidationError): Child(a='snap') def test_inheritance_keep(): class Parent(BaseModel): a: int @validator('a') def add_to_a(cls, v): return v + 1 class Child(Parent): pass assert Child(a=0).a == 1 def test_inheritance_replace(): class Parent(BaseModel): a: int @validator('a') def add_to_a(cls, v): return v + 1 class Child(Parent): @validator('a') def add_to_a(cls, v): return v + 5 assert Child(a=0).a == 5 def test_inheritance_new(): class Parent(BaseModel): a: int @validator('a') def add_one_to_a(cls, v): return v + 1 class Child(Parent): @validator('a') def add_five_to_a(cls, v): return v + 5 assert Child(a=0).a == 6 def test_validation_each_item(): class Model(BaseModel): foobar: Dict[int, int] @validator('foobar', each_item=True) def check_foobar(cls, v): return v + 1 assert Model(foobar={1: 1}).foobar == {1: 2} def test_validation_each_item_one_sublevel(): class Model(BaseModel): foobar: List[Tuple[int, int]] @validator('foobar', each_item=True) def check_foobar(cls, v: Tuple[int, int]) -> Tuple[int, int]: v1, v2 = v assert v1 == v2 return v assert Model(foobar=[(1, 1), (2, 2)]).foobar == [(1, 1), (2, 2)] def test_key_validation(): class Model(BaseModel): foobar: Dict[int, int] @validator('foobar') def check_foobar(cls, value): return {k + 1: v + 1 for k, v in value.items()} assert Model(foobar={1: 1}).foobar == {2: 2} def test_validator_always_optional(): check_calls = 0 class Model(BaseModel): a: Optional[str] = None @validator('a', pre=True, always=True) def check_a(cls, v): nonlocal check_calls check_calls += 1 return v or 'default value' assert Model(a='y').a == 'y' assert check_calls == 1 assert Model().a == 'default value' assert check_calls == 2 def test_validator_always_pre(): check_calls = 0 class Model(BaseModel): a: str = None @validator('a', always=True, pre=True) def check_a(cls, v): nonlocal check_calls check_calls += 1 return v or 'default value' assert Model(a='y').a == 'y' assert Model().a == 'default value' assert check_calls == 2 def test_validator_always_post(): class Model(BaseModel): a: str = None @validator('a', always=True) def check_a(cls, v): return v or 'default value' assert Model(a='y').a == 'y' assert Model().a == 'default value' def test_validator_always_post_optional(): class Model(BaseModel): a: Optional[str] = None @validator('a', always=True, pre=True) def check_a(cls, v): return v or 'default value' assert Model(a='y').a == 'y' assert Model().a == 'default value' def test_datetime_validator(): check_calls = 0 class Model(BaseModel): d: datetime = None @validator('d', pre=True, always=True) def check_d(cls, v): nonlocal check_calls check_calls += 1 return v or datetime(2032, 1, 1) assert Model(d='2023-01-01T00:00:00').d == datetime(2023, 1, 1) assert check_calls == 1 assert Model().d == datetime(2032, 1, 1) assert check_calls == 2 assert Model(d=datetime(2023, 1, 1)).d == datetime(2023, 1, 1) assert check_calls == 3 def test_pre_called_once(): check_calls = 0 class Model(BaseModel): a: Tuple[int, int, int] @validator('a', pre=True) def check_a(cls, v): nonlocal check_calls check_calls += 1 return v assert Model(a=['1', '2', '3']).a == (1, 2, 3) assert check_calls == 1 @pytest.mark.parametrize( 'fields,result', [ (['val'], '_v_'), (['foobar'], '_v_'), (['val', 'field'], '_v_,_field_'), (['val', 'config'], '_v_,_config_'), (['val', 'values'], '_v_,_values_'), (['val', 'field', 'config'], '_v_,_field_,_config_'), (['val', 'field', 'values'], '_v_,_field_,_values_'), (['val', 'config', 'values'], '_v_,_config_,_values_'), (['val', 'field', 'values', 'config'], '_v_,_field_,_values_,_config_'), (['cls', 'val'], '_cls_,_v_'), (['cls', 'foobar'], '_cls_,_v_'), (['cls', 'val', 'field'], '_cls_,_v_,_field_'), (['cls', 'val', 'config'], '_cls_,_v_,_config_'), (['cls', 'val', 'values'], '_cls_,_v_,_values_'), (['cls', 'val', 'field', 'config'], '_cls_,_v_,_field_,_config_'), (['cls', 'val', 'field', 'values'], '_cls_,_v_,_field_,_values_'), (['cls', 'val', 'config', 'values'], '_cls_,_v_,_config_,_values_'), (['cls', 'val', 'field', 'values', 'config'], '_cls_,_v_,_field_,_values_,_config_'), ], ) def test_make_generic_validator(fields, result): exec(f"""def testing_function({', '.join(fields)}): return {' + "," + '.join(fields)}""") func = locals()['testing_function'] validator = make_generic_validator(func) assert validator.__qualname__ == 'testing_function' assert validator.__name__ == 'testing_function' # args: cls, v, values, field, config assert validator('_cls_', '_v_', '_values_', '_field_', '_config_') == result def test_make_generic_validator_kwargs(): def test_validator(v, **kwargs): return ', '.join(f'{k}: {v}' for k, v in kwargs.items()) validator = make_generic_validator(test_validator) assert validator.__name__ == 'test_validator' assert validator('_cls_', '_v_', '_vs_', '_f_', '_c_') == 'values: _vs_, field: _f_, config: _c_' def test_make_generic_validator_invalid(): def test_validator(v, foobar): return foobar with pytest.raises(ConfigError) as exc_info: make_generic_validator(test_validator) assert ': (v, foobar), should be: (value, values, config, field)' in str(exc_info.value) def test_make_generic_validator_cls_kwargs(): def test_validator(cls, v, **kwargs): return ', '.join(f'{k}: {v}' for k, v in kwargs.items()) validator = make_generic_validator(test_validator) assert validator.__name__ == 'test_validator' assert validator('_cls_', '_v_', '_vs_', '_f_', '_c_') == 'values: _vs_, field: _f_, config: _c_' def test_make_generic_validator_cls_invalid(): def test_validator(cls, v, foobar): return foobar with pytest.raises(ConfigError) as exc_info: make_generic_validator(test_validator) assert ': (cls, v, foobar), should be: (cls, value, values, config, field)' in str(exc_info.value) def test_make_generic_validator_self(): def test_validator(self, v): return v with pytest.raises(ConfigError) as exc_info: make_generic_validator(test_validator) assert ': (self, v), "self" not permitted as first argument, should be: (cls, value' in str(exc_info.value) def test_assert_raises_validation_error(): class Model(BaseModel): a: str @validator('a') def check_a(cls, v): assert v == 'a', 'invalid a' return v Model(a='a') with pytest.raises(ValidationError) as exc_info: Model(a='snap') injected_by_pytest = "\nassert 'snap' == 'a'\n - a\n + snap" assert exc_info.value.errors() == [ {'loc': ('a',), 'msg': f'invalid a{injected_by_pytest}', 'type': 'assertion_error'} ] def test_whole(): with pytest.warns(DeprecationWarning, match='The "whole" keyword argument is deprecated'): class Model(BaseModel): x: List[int] @validator('x', whole=True) def check_something(cls, v): return v def test_root_validator(): root_val_values = [] class Model(BaseModel): a: int = 1 b: str c: str @validator('b') def repeat_b(cls, v): return v * 2 @root_validator def example_root_validator(cls, values): root_val_values.append(values) if 'snap' in values.get('b', ''): raise ValueError('foobar') return dict(values, b='changed') @root_validator def example_root_validator2(cls, values): root_val_values.append(values) if 'snap' in values.get('c', ''): raise ValueError('foobar2') return dict(values, c='changed') assert Model(a='123', b='bar', c='baz').dict() == {'a': 123, 'b': 'changed', 'c': 'changed'} with pytest.raises(ValidationError) as exc_info: Model(b='snap dragon', c='snap dragon2') assert exc_info.value.errors() == [ {'loc': ('__root__',), 'msg': 'foobar', 'type': 'value_error'}, {'loc': ('__root__',), 'msg': 'foobar2', 'type': 'value_error'}, ] with pytest.raises(ValidationError) as exc_info: Model(a='broken', b='bar', c='baz') assert exc_info.value.errors() == [ {'loc': ('a',), 'msg': 'value is not a valid integer', 'type': 'type_error.integer'} ] assert root_val_values == [ {'a': 123, 'b': 'barbar', 'c': 'baz'}, {'a': 123, 'b': 'changed', 'c': 'baz'}, {'a': 1, 'b': 'snap dragonsnap dragon', 'c': 'snap dragon2'}, {'a': 1, 'b': 'snap dragonsnap dragon', 'c': 'snap dragon2'}, {'b': 'barbar', 'c': 'baz'}, {'b': 'changed', 'c': 'baz'}, ] def test_root_validator_pre(): root_val_values = [] class Model(BaseModel): a: int = 1 b: str @validator('b') def repeat_b(cls, v): return v * 2 @root_validator(pre=True) def root_validator(cls, values): root_val_values.append(values) if 'snap' in values.get('b', ''): raise ValueError('foobar') return {'a': 42, 'b': 'changed'} assert Model(a='123', b='bar').dict() == {'a': 42, 'b': 'changedchanged'} with pytest.raises(ValidationError) as exc_info: Model(b='snap dragon') assert root_val_values == [{'a': '123', 'b': 'bar'}, {'b': 'snap dragon'}] assert exc_info.value.errors() == [{'loc': ('__root__',), 'msg': 'foobar', 'type': 'value_error'}] def test_root_validator_repeat(): with pytest.raises(errors.ConfigError, match='duplicate validator function'): class Model(BaseModel): a: int = 1 @root_validator def root_validator_repeated(cls, values): return values @root_validator # noqa: F811 def root_validator_repeated(cls, values): # noqa: F811 return values def test_root_validator_repeat2(): with pytest.raises(errors.ConfigError, match='duplicate validator function'): class Model(BaseModel): a: int = 1 @validator('a') def repeat_validator(cls, v): return v @root_validator(pre=True) # noqa: F811 def repeat_validator(cls, values): # noqa: F811 return values def test_root_validator_self(): with pytest.raises( errors.ConfigError, match=r'Invalid signature for root validator root_validator: \(self, values\)' ): class Model(BaseModel): a: int = 1 @root_validator def root_validator(self, values): return values def test_root_validator_extra(): with pytest.raises(errors.ConfigError) as exc_info: class Model(BaseModel): a: int = 1 @root_validator def root_validator(cls, values, another): return values assert str(exc_info.value) == ( 'Invalid signature for root validator root_validator: (cls, values, another), should be: (cls, values).' ) def test_root_validator_types(): root_val_values = None class Model(BaseModel): a: int = 1 b: str @root_validator def root_validator(cls, values): nonlocal root_val_values root_val_values = cls, values return values class Config: extra = Extra.allow assert Model(b='bar', c='wobble').dict() == {'a': 1, 'b': 'bar', 'c': 'wobble'} assert root_val_values == (Model, {'a': 1, 'b': 'bar', 'c': 'wobble'}) def test_root_validator_inheritance(): calls = [] class Parent(BaseModel): pass @root_validator def root_validator_parent(cls, values): calls.append(f'parent validator: {values}') return {'extra1': 1, **values} class Child(Parent): a: int @root_validator def root_validator_child(cls, values): calls.append(f'child validator: {values}') return {'extra2': 2, **values} assert len(Child.__post_root_validators__) == 2 assert len(Child.__pre_root_validators__) == 0 assert Child(a=123).dict() == {'extra2': 2, 'extra1': 1, 'a': 123} assert calls == ["parent validator: {'a': 123}", "child validator: {'extra1': 1, 'a': 123}"] def test_root_validator_returns_none_exception(): class Model(BaseModel): a: int = 1 @root_validator def root_validator_repeated(cls, values): return None with pytest.raises(TypeError, match='Model values must be a dict'): Model() def reusable_validator(num): return num * 2 def test_reuse_global_validators(): class Model(BaseModel): x: int y: int double_x = validator('x', allow_reuse=True)(reusable_validator) double_y = validator('y', allow_reuse=True)(reusable_validator) assert dict(Model(x=1, y=1)) == {'x': 2, 'y': 2} def declare_with_reused_validators(include_root, allow_1, allow_2, allow_3): class Model(BaseModel): a: str b: str @validator('a', allow_reuse=allow_1) def duplicate_name(cls, v): return v @validator('b', allow_reuse=allow_2) # noqa F811 def duplicate_name(cls, v): # noqa F811 return v if include_root: @root_validator(allow_reuse=allow_3) # noqa F811 def duplicate_name(cls, values): # noqa F811 return values @pytest.fixture def reset_tracked_validators(): from pydantic.class_validators import _FUNCS original_tracked_validators = set(_FUNCS) yield _FUNCS.clear() _FUNCS.update(original_tracked_validators) @pytest.mark.parametrize('include_root,allow_1,allow_2,allow_3', product(*[[True, False]] * 4)) def test_allow_reuse(include_root, allow_1, allow_2, allow_3, reset_tracked_validators): duplication_count = int(not allow_1) + int(not allow_2) + int(include_root and not allow_3) if duplication_count > 1: with pytest.raises(ConfigError) as exc_info: declare_with_reused_validators(include_root, allow_1, allow_2, allow_3) assert str(exc_info.value).startswith('duplicate validator function') else: declare_with_reused_validators(include_root, allow_1, allow_2, allow_3) @pytest.mark.parametrize('validator_classmethod,root_validator_classmethod', product(*[[True, False]] * 2)) def test_root_validator_classmethod(validator_classmethod, root_validator_classmethod, reset_tracked_validators): root_val_values = [] class Model(BaseModel): a: int = 1 b: str def repeat_b(cls, v): return v * 2 if validator_classmethod: repeat_b = classmethod(repeat_b) repeat_b = validator('b')(repeat_b) def example_root_validator(cls, values): root_val_values.append(values) if 'snap' in values.get('b', ''): raise ValueError('foobar') return dict(values, b='changed') if root_validator_classmethod: example_root_validator = classmethod(example_root_validator) example_root_validator = root_validator(example_root_validator) assert Model(a='123', b='bar').dict() == {'a': 123, 'b': 'changed'} with pytest.raises(ValidationError) as exc_info: Model(b='snap dragon') assert exc_info.value.errors() == [{'loc': ('__root__',), 'msg': 'foobar', 'type': 'value_error'}] with pytest.raises(ValidationError) as exc_info: Model(a='broken', b='bar') assert exc_info.value.errors() == [ {'loc': ('a',), 'msg': 'value is not a valid integer', 'type': 'type_error.integer'} ] assert root_val_values == [{'a': 123, 'b': 'barbar'}, {'a': 1, 'b': 'snap dragonsnap dragon'}, {'b': 'barbar'}] def test_root_validator_skip_on_failure(): a_called = False class ModelA(BaseModel): a: int @root_validator def example_root_validator(cls, values): nonlocal a_called a_called = True with pytest.raises(ValidationError): ModelA(a='a') assert a_called b_called = False class ModelB(BaseModel): a: int @root_validator(skip_on_failure=True) def example_root_validator(cls, values): nonlocal b_called b_called = True with pytest.raises(ValidationError): ModelB(a='a') assert not b_called def test_assignment_validator_cls(): validator_calls = 0 class Model(BaseModel): name: str class Config: validate_assignment = True @validator('name') def check_foo(cls, value): nonlocal validator_calls validator_calls += 1 assert cls == Model return value m = Model(name='hello') m.name = 'goodbye' assert validator_calls == 2 def test_literal_validator(): class Model(BaseModel): a: Literal['foo'] Model(a='foo') with pytest.raises(ValidationError) as exc_info: Model(a='nope') assert exc_info.value.errors() == [ { 'loc': ('a',), 'msg': "unexpected value; permitted: 'foo'", 'type': 'value_error.const', 'ctx': {'given': 'nope', 'permitted': ('foo',)}, } ] def test_literal_validator_str_enum(): class Bar(str, Enum): FIZ = 'fiz' FUZ = 'fuz' class Foo(BaseModel): bar: Bar barfiz: Literal[Bar.FIZ] fizfuz: Literal[Bar.FIZ, Bar.FUZ] my_foo = Foo.parse_obj({'bar': 'fiz', 'barfiz': 'fiz', 'fizfuz': 'fiz'}) assert my_foo.bar is Bar.FIZ assert my_foo.barfiz is Bar.FIZ assert my_foo.fizfuz is Bar.FIZ my_foo = Foo.parse_obj({'bar': 'fiz', 'barfiz': 'fiz', 'fizfuz': 'fuz'}) assert my_foo.bar is Bar.FIZ assert my_foo.barfiz is Bar.FIZ assert my_foo.fizfuz is Bar.FUZ def test_nested_literal_validator(): L1 = Literal['foo'] L2 = Literal['bar'] class Model(BaseModel): a: Literal[L1, L2] Model(a='foo') with pytest.raises(ValidationError) as exc_info: Model(a='nope') assert exc_info.value.errors() == [ { 'loc': ('a',), 'msg': "unexpected value; permitted: 'foo', 'bar'", 'type': 'value_error.const', 'ctx': {'given': 'nope', 'permitted': ('foo', 'bar')}, } ] def test_union_literal_with_constraints(): class Model(BaseModel, validate_assignment=True): x: Union[Literal[42], Literal['pika']] = Field(allow_mutation=False) m = Model(x=42) with pytest.raises(TypeError): m.x += 1 def test_field_that_is_being_validated_is_excluded_from_validator_values(mocker): check_values = mocker.MagicMock() class Model(BaseModel): foo: str bar: str = Field(alias='pika') baz: str class Config: validate_assignment = True @validator('foo') def validate_foo(cls, v, values): check_values({**values}) return v @validator('bar') def validate_bar(cls, v, values): check_values({**values}) return v model = Model(foo='foo_value', pika='bar_value', baz='baz_value') check_values.reset_mock() assert list(dict(model).items()) == [('foo', 'foo_value'), ('bar', 'bar_value'), ('baz', 'baz_value')] model.foo = 'new_foo_value' check_values.assert_called_once_with({'bar': 'bar_value', 'baz': 'baz_value'}) check_values.reset_mock() model.bar = 'new_bar_value' check_values.assert_called_once_with({'foo': 'new_foo_value', 'baz': 'baz_value'}) # ensure field order is the same assert list(dict(model).items()) == [('foo', 'new_foo_value'), ('bar', 'new_bar_value'), ('baz', 'baz_value')] def test_exceptions_in_field_validators_restore_original_field_value(): class Model(BaseModel): foo: str class Config: validate_assignment = True @validator('foo') def validate_foo(cls, v): if v == 'raise_exception': raise RuntimeError('test error') return v model = Model(foo='foo') with pytest.raises(RuntimeError, match='test error'): model.foo = 'raise_exception' assert model.foo == 'foo' def test_overridden_root_validators(mocker): validate_stub = mocker.stub(name='validate') class A(BaseModel): x: str @root_validator(pre=True) def pre_root(cls, values): validate_stub('A', 'pre') return values @root_validator(pre=False) def post_root(cls, values): validate_stub('A', 'post') return values class B(A): @root_validator(pre=True) def pre_root(cls, values): validate_stub('B', 'pre') return values @root_validator(pre=False) def post_root(cls, values): validate_stub('B', 'post') return values A(x='pika') assert validate_stub.call_args_list == [mocker.call('A', 'pre'), mocker.call('A', 'post')] validate_stub.reset_mock() B(x='pika') assert validate_stub.call_args_list == [mocker.call('B', 'pre'), mocker.call('B', 'post')]
""" model the end-to-end Subaru-SCExAO optics system Here, we will add the basic functionality of the Subaru+SCExAO Telescope, including the primary, secondary, and AO188. The SCExAO system sits behind the AO188 instrument of Subaru, which is a 188-element AO system located at the Nasmyth focus (IR) of the telescope. AO188 is a basic 4f-type optical system with a 188 element circular DM. Proper will only simulate 2D square DM's, so we use a 14x14 square inserted in the middle of the collimated beam. We also artifically oversize the number of AO188 actuators to be a 16x16 array since we oversize the DM to the beam by one actuator on either side of the beam. This script is meant to override any Subaru/SCExAO-specific parameters specified in the user's params.py """ import numpy as np from inspect import getframeinfo, stack import proper from medis.params import iop, sp, ap, tp from medis.utils import dprint import medis.optics as opx import medis.aberrations as aber import medis.adaptive as ao import medis.atmosphere as atmos import medis.coronagraphy as cg ################################################################################################# ################################################################################################# ################################################################################################# # iop.update_testname('SCExAO-dummy-save') # Defining Subaru parameters # ---------------------------- # According to Iye-et.al.2004-Optical_Performance_of_Subaru:AstronSocJapan, the AO188 uses the IR-Cass secondary, # but then feeds it to the IR Nasmyth f/13.6 focusing arrangement. So instead of simulating the full Subaru system, # we can use the effective focal length at the Nasmyth focus, and simulate it as a single lens. tp.d_nsmyth = 7.9716 # m pupil diameter tp.fn_nsmyth = 13.612 # f# Nasmyth focus tp.flen_nsmyth = tp.d_nsmyth * tp.fn_nsmyth # m focal length tp.dist_nsmyth_ao1 = tp.flen_nsmyth + 1.14 # m distance secondary to M1 of AO188 (hand-tuned, could update with # data from literature) # Below are the actual dimenstions of the Subaru telescope. # -------------------------------- # tp.enterence_d = 8.2 # m diameter of primary # tp.flen_primary = 15 # m focal length of primary # tp.dist_pri_second = 12.652 # m distance primary -> secondary # Secondary tp.d_secondary = 1.265 # m diameter secondary, used for central obscuration # tp.fn_secondary = 12.6 # Re-writing params terms in Subaru-units # need this to accurately make atmospheric and aberration maps tp.entrance_d = tp.d_nsmyth tp.flen_primary = tp.flen_nsmyth # ---------------------------- # AO188 DM tp.act_woofer = 15 # approximately a 188 DM (14*14=169) but then we include +2 pixels because the dm map is oversized # by 2 pixels around the edge of the array # ---------------------------- # AO188 OAP1 # Paramaters taken from "Design of the Subaru laser guide star adaptive optics module" # Makoto Watanabe et. al. SPIE doi: 10.1117/12.551032 tp.d_ao1 = 0.20 # m diamater of AO1 tp.fl_ao1 = 1.201 # m focal length OAP1 tp.dist_ao1_dm = 1.345 # m distance OAP1 to DM # ---------------------------- # AO188 OAP2 tp.dist_dm_ao2 = 2.511-tp.dist_ao1_dm # m distance DM to OAP2 tp.d_ao2 = 0.2 # m diamater of AO2 tp.fl_ao2 = 1.201 # m focal length AO2 tp.dist_oap2_focus = 1.261 # ------------------------------ # SCExAO # These params aren't actually working, so just doing very basic, 4F optical systems until further notice tp.d_tweeter = 0.051 # diameter of optics in SCExAO train are 2 inches=0.051 m tp.act_tweeter = 50 # SCExAO actuators are 50x50=2500 actuators tp.fl_SxOAPG = 0.255 # m focal length of Genera SCExAO lens (OAP1,3,4,5) tp.fl_SxOAP2 = 0.519 # m focal length of SCExAO OAP 2 tp.d_SxOAPG = 0.051 # diameter of SCExAO OAP's # tp.dist_cg_sl1 = tp.fl_SxOAPG + .000001 # m distance between AO188 focus and scexao lens1 tp.dist_SxOAP1_scexao = 0.1345 # m tp.dist_scexao_sl2 = 0.2511 - tp.dist_SxOAP1_scexao # m tp.dist_sl2_focus = 0.1261 # m tp.lens_params = [{'aber_vals': [7.2e-17, 0.8, 3.1], 'diam': tp.entrance_d, 'fl': tp.flen_nsmyth, 'dist': tp.dist_nsmyth_ao1, 'name': 'effective-primary'}, {'aber_vals': [7.2e-17, 0.8, 3.1], 'diam': tp.d_ao1, 'fl': tp.fl_ao1, 'dist': tp.dist_ao1_dm, 'name': 'ao188-OAP1'}, {'aber_vals': [7.2e-17, 0.8, 3.1], 'diam': tp.d_ao2, 'fl': tp.fl_ao2, 'dist': tp.dist_oap2_focus, 'name': 'ao188-OAP2'}, {'aber_vals': [7.2e-17, 0.8, 3.1], 'diam': tp.d_SxOAPG, 'fl': tp.fl_SxOAPG, 'dist': tp.fl_SxOAPG, 'name': 'SxOAPG'}, {'aber_vals': [7.2e-17, 0.8, 3.1], 'diam': tp.d_SxOAPG, 'fl': tp.fl_SxOAP2, 'dist': tp.fl_SxOAP2, 'name': 'SxOAP2'} ] # ------------------------------ # Coronagraph tp.cg_type = 'Gaussian' tp.cg_size = 2 # physical size or lambda/D size tp.cg_size_units = "l/D" # "m" or "l/D" # tp.fl_cg_lens = 0.1021 # m tp.fl_cg_lens = tp.fl_SxOAPG tp.lyot_size = 0.9 # units are in fraction of surface blocked ################################################################################################# ################################################################################################# ################################################################################################# def Subaru_SCExAO(empty_lamda, grid_size, PASSVALUE): """ propagates instantaneous complex E-field thru Subaru from the primary through SCExAO this function is called a 'prescription' by proper uses PyPROPER3 to generate the complex E-field at the source, then propagates it through atmosphere, then telescope, to the focal plane the AO simulator happens here this does not include the observation of the wavefront by the detector :returns spectral cube at instantaneous time in the focal_plane() """ # print("Propagating Broadband Wavefront Through Subaru") # Initialize the Wavefront in Proper wfo = opx.Wavefronts(sp.debug) wfo.initialize_proper() # Atmosphere # atmos has only effect on phase delay, not intensity wfo.loop_collection(atmos.add_atmos, PASSVALUE['iter'], plane_name='atmosphere') # Defines aperture (baffle-before primary) # wfo.loop_collection(opx.add_obscurations, d_primary=tp.entrance_d, d_secondary=tp.d_secondary, legs_frac=0.05) wfo.loop_collection(opx.SubaruPupil, plane_name='SubaruPupil') wfo.loop_collection(proper.prop_circular_aperture, **{'radius': tp.entrance_d / 2}) # clear inside, dark outside wfo.loop_collection(proper.prop_define_entrance, plane_name='entrance_pupil') # normalizes abs intensity if ap.companion: # Must do this after all calls to prop_define_entrance wfo.loop_collection(opx.offset_companion) wfo.loop_collection(proper.prop_circular_aperture, **{'radius': tp.entrance_d / 2}) # clear inside, dark outside # Test Sampling if sp.verbose: wfo.loop_collection(opx.check_sampling, PASSVALUE['iter'], "Telescope Aperture", getframeinfo(stack()[0][0]), units='mm') # Testing Primary Focus (instead of propagating to focal plane) # wfo.loop_collection(opx.prop_pass_lens, tp.flen_nsmyth, tp.flen_nsmyth) # test only going to prime focus ######################################## # Subaru Propagation ####################################### # Effective Primary # CPA from Effective Primary wfo.loop_collection(aber.add_aber, step=PASSVALUE['iter'], lens_name='effective-primary') # high order wfo.loop_collection(aber.add_zern_ab, tp.zernike_orders, aber.randomize_zern_values(tp.zernike_orders)) # low order wfo.loop_collection(opx.prop_pass_lens, tp.flen_nsmyth, tp.dist_nsmyth_ao1) ######################################## # AO188 Propagation ######################################## # # AO188-OAP1 wfo.loop_collection(aber.add_aber, step=PASSVALUE['iter'], lens_name='ao188-OAP1') # high order wfo.loop_collection(opx.prop_pass_lens, tp.fl_ao1, tp.dist_ao1_dm) # AO System if tp.use_ao: WFS_map = ao.open_loop_wfs(wfo) wfo.loop_collection(ao.deformable_mirror, WFS_map, PASSVALUE['iter'], apodize=True, plane_name='woofer', debug=sp.verbose) # don't use PASSVALUE['WFS_map'] here because open loop # ------------------------------------------------ wfo.loop_collection(proper.prop_propagate, tp.dist_dm_ao2) # AO188-OAP2 wfo.loop_collection(aber.add_aber, step=PASSVALUE['iter'], lens_name='ao188-OAP2') # high order CPA wfo.loop_collection(aber.add_zern_ab, tp.zernike_orders, aber.randomize_zern_values(tp.zernike_orders)/2) # low order CPA wfo.loop_collection(opx.prop_pass_lens, tp.fl_ao2, tp.dist_oap2_focus) ######################################## # SCExAO # ####################################### # SXExAO Reimaging 1 wfo.loop_collection(aber.add_aber, step=PASSVALUE['iter'], lens_name='SxOAPG') # high order CPA wfo.loop_collection(proper.prop_propagate, tp.fl_SxOAPG) # from AO188 focus to S-OAP1 wfo.loop_collection(opx.prop_pass_lens, tp.fl_SxOAPG, tp.fl_SxOAPG) # from SxOAP1 to tweeter-DM # # AO System if tp.use_ao: # WFS_map = ao.open_loop_wfs(wfo) wfo.loop_collection(ao.deformable_mirror, WFS_map, PASSVALUE['iter'], apodize=True, plane_name='tweeter', debug=sp.verbose) # ------------------------------------------------ wfo.loop_collection(proper.prop_propagate, tp.fl_SxOAPG) # from tweeter-DM to OAP2 # SXExAO Reimaging 2 wfo.loop_collection(aber.add_aber, step=PASSVALUE['iter'], lens_name='SxOAP2') # high order NCPA wfo.loop_collection(aber.add_zern_ab, tp.zernike_orders, aber.randomize_zern_values(tp.zernike_orders)/2) # low order NCPA wfo.loop_collection(opx.prop_pass_lens, tp.fl_SxOAP2, tp.fl_SxOAP2, plane_name='post-DM-focus') #tp.dist_sl2_focus # wfo.loop_collection(opx.check_sampling, PASSVALUE['iter'], "post-DM-focus", # getframeinfo(stack()[0][0]), units='nm') # Coronagraph # settings should be put into tp, and are not implicitly passed here wfo.loop_collection(cg.coronagraph, occulter_mode=tp.cg_type, plane_name='coronagraph') ######################################## # Focal Plane # ####################################### # Check Sampling in focal plane # wfo.focal_plane fft-shifts wfo from Fourier Space (origin==lower left corner) to object space (origin==center) cpx_planes, sampling = wfo.focal_plane() if sp.verbose: wfo.loop_collection(opx.check_sampling, PASSVALUE['iter'], "focal plane", getframeinfo(stack()[0][0]), units='nm') # opx.check_sampling(PASSVALUE['iter'], wfo, "focal plane", getframeinfo(stack()[0][0]), units='arcsec') if sp.verbose: print(f"Finished datacube at timestep = {PASSVALUE["iter"]}") return cpx_planes, sampling
""" model the end-to-end Subaru-SCExAO optics system Here, we will add the basic functionality of the Subaru+SCExAO Telescope, including the primary, secondary, and AO188. The SCExAO system sits behind the AO188 instrument of Subaru, which is a 188-element AO system located at the Nasmyth focus (IR) of the telescope. AO188 is a basic 4f-type optical system with a 188 element circular DM. Proper will only simulate 2D square DM's, so we use a 14x14 square inserted in the middle of the collimated beam. We also artifically oversize the number of AO188 actuators to be a 16x16 array since we oversize the DM to the beam by one actuator on either side of the beam. This script is meant to override any Subaru/SCExAO-specific parameters specified in the user's params.py """ import numpy as np from inspect import getframeinfo, stack import proper from medis.params import iop, sp, ap, tp from medis.utils import dprint import medis.optics as opx import medis.aberrations as aber import medis.adaptive as ao import medis.atmosphere as atmos import medis.coronagraphy as cg ################################################################################################# ################################################################################################# ################################################################################################# # iop.update_testname('SCExAO-dummy-save') # Defining Subaru parameters # ---------------------------- # According to Iye-et.al.2004-Optical_Performance_of_Subaru:AstronSocJapan, the AO188 uses the IR-Cass secondary, # but then feeds it to the IR Nasmyth f/13.6 focusing arrangement. So instead of simulating the full Subaru system, # we can use the effective focal length at the Nasmyth focus, and simulate it as a single lens. tp.d_nsmyth = 7.9716 # m pupil diameter tp.fn_nsmyth = 13.612 # f# Nasmyth focus tp.flen_nsmyth = tp.d_nsmyth * tp.fn_nsmyth # m focal length tp.dist_nsmyth_ao1 = tp.flen_nsmyth + 1.14 # m distance secondary to M1 of AO188 (hand-tuned, could update with # data from literature) # Below are the actual dimenstions of the Subaru telescope. # -------------------------------- # tp.enterence_d = 8.2 # m diameter of primary # tp.flen_primary = 15 # m focal length of primary # tp.dist_pri_second = 12.652 # m distance primary -> secondary # Secondary tp.d_secondary = 1.265 # m diameter secondary, used for central obscuration # tp.fn_secondary = 12.6 # Re-writing params terms in Subaru-units # need this to accurately make atmospheric and aberration maps tp.entrance_d = tp.d_nsmyth tp.flen_primary = tp.flen_nsmyth # ---------------------------- # AO188 DM tp.act_woofer = 15 # approximately a 188 DM (14*14=169) but then we include +2 pixels because the dm map is oversized # by 2 pixels around the edge of the array # ---------------------------- # AO188 OAP1 # Paramaters taken from "Design of the Subaru laser guide star adaptive optics module" # Makoto Watanabe et. al. SPIE doi: 10.1117/12.551032 tp.d_ao1 = 0.20 # m diamater of AO1 tp.fl_ao1 = 1.201 # m focal length OAP1 tp.dist_ao1_dm = 1.345 # m distance OAP1 to DM # ---------------------------- # AO188 OAP2 tp.dist_dm_ao2 = 2.511-tp.dist_ao1_dm # m distance DM to OAP2 tp.d_ao2 = 0.2 # m diamater of AO2 tp.fl_ao2 = 1.201 # m focal length AO2 tp.dist_oap2_focus = 1.261 # ------------------------------ # SCExAO # These params aren't actually working, so just doing very basic, 4F optical systems until further notice tp.d_tweeter = 0.051 # diameter of optics in SCExAO train are 2 inches=0.051 m tp.act_tweeter = 50 # SCExAO actuators are 50x50=2500 actuators tp.fl_SxOAPG = 0.255 # m focal length of Genera SCExAO lens (OAP1,3,4,5) tp.fl_SxOAP2 = 0.519 # m focal length of SCExAO OAP 2 tp.d_SxOAPG = 0.051 # diameter of SCExAO OAP's # tp.dist_cg_sl1 = tp.fl_SxOAPG + .000001 # m distance between AO188 focus and scexao lens1 tp.dist_SxOAP1_scexao = 0.1345 # m tp.dist_scexao_sl2 = 0.2511 - tp.dist_SxOAP1_scexao # m tp.dist_sl2_focus = 0.1261 # m tp.lens_params = [{'aber_vals': [7.2e-17, 0.8, 3.1], 'diam': tp.entrance_d, 'fl': tp.flen_nsmyth, 'dist': tp.dist_nsmyth_ao1, 'name': 'effective-primary'}, {'aber_vals': [7.2e-17, 0.8, 3.1], 'diam': tp.d_ao1, 'fl': tp.fl_ao1, 'dist': tp.dist_ao1_dm, 'name': 'ao188-OAP1'}, {'aber_vals': [7.2e-17, 0.8, 3.1], 'diam': tp.d_ao2, 'fl': tp.fl_ao2, 'dist': tp.dist_oap2_focus, 'name': 'ao188-OAP2'}, {'aber_vals': [7.2e-17, 0.8, 3.1], 'diam': tp.d_SxOAPG, 'fl': tp.fl_SxOAPG, 'dist': tp.fl_SxOAPG, 'name': 'SxOAPG'}, {'aber_vals': [7.2e-17, 0.8, 3.1], 'diam': tp.d_SxOAPG, 'fl': tp.fl_SxOAP2, 'dist': tp.fl_SxOAP2, 'name': 'SxOAP2'} ] # ------------------------------ # Coronagraph tp.cg_type = 'Gaussian' tp.cg_size = 2 # physical size or lambda/D size tp.cg_size_units = "l/D" # "m" or "l/D" # tp.fl_cg_lens = 0.1021 # m tp.fl_cg_lens = tp.fl_SxOAPG tp.lyot_size = 0.9 # units are in fraction of surface blocked ################################################################################################# ################################################################################################# ################################################################################################# def Subaru_SCExAO(empty_lamda, grid_size, PASSVALUE): """ propagates instantaneous complex E-field thru Subaru from the primary through SCExAO this function is called a 'prescription' by proper uses PyPROPER3 to generate the complex E-field at the source, then propagates it through atmosphere, then telescope, to the focal plane the AO simulator happens here this does not include the observation of the wavefront by the detector :returns spectral cube at instantaneous time in the focal_plane() """ # print("Propagating Broadband Wavefront Through Subaru") # Initialize the Wavefront in Proper wfo = opx.Wavefronts(sp.debug) wfo.initialize_proper() # Atmosphere # atmos has only effect on phase delay, not intensity wfo.loop_collection(atmos.add_atmos, PASSVALUE['iter'], plane_name='atmosphere') # Defines aperture (baffle-before primary) # wfo.loop_collection(opx.add_obscurations, d_primary=tp.entrance_d, d_secondary=tp.d_secondary, legs_frac=0.05) wfo.loop_collection(opx.SubaruPupil, plane_name='SubaruPupil') wfo.loop_collection(proper.prop_circular_aperture, **{'radius': tp.entrance_d / 2}) # clear inside, dark outside wfo.loop_collection(proper.prop_define_entrance, plane_name='entrance_pupil') # normalizes abs intensity if ap.companion: # Must do this after all calls to prop_define_entrance wfo.loop_collection(opx.offset_companion) wfo.loop_collection(proper.prop_circular_aperture, **{'radius': tp.entrance_d / 2}) # clear inside, dark outside # Test Sampling if sp.verbose: wfo.loop_collection(opx.check_sampling, PASSVALUE['iter'], "Telescope Aperture", getframeinfo(stack()[0][0]), units='mm') # Testing Primary Focus (instead of propagating to focal plane) # wfo.loop_collection(opx.prop_pass_lens, tp.flen_nsmyth, tp.flen_nsmyth) # test only going to prime focus ######################################## # Subaru Propagation ####################################### # Effective Primary # CPA from Effective Primary wfo.loop_collection(aber.add_aber, step=PASSVALUE['iter'], lens_name='effective-primary') # high order wfo.loop_collection(aber.add_zern_ab, tp.zernike_orders, aber.randomize_zern_values(tp.zernike_orders)) # low order wfo.loop_collection(opx.prop_pass_lens, tp.flen_nsmyth, tp.dist_nsmyth_ao1) ######################################## # AO188 Propagation ######################################## # # AO188-OAP1 wfo.loop_collection(aber.add_aber, step=PASSVALUE['iter'], lens_name='ao188-OAP1') # high order wfo.loop_collection(opx.prop_pass_lens, tp.fl_ao1, tp.dist_ao1_dm) # AO System if tp.use_ao: WFS_map = ao.open_loop_wfs(wfo) wfo.loop_collection(ao.deformable_mirror, WFS_map, PASSVALUE['iter'], apodize=True, plane_name='woofer', debug=sp.verbose) # don't use PASSVALUE['WFS_map'] here because open loop # ------------------------------------------------ wfo.loop_collection(proper.prop_propagate, tp.dist_dm_ao2) # AO188-OAP2 wfo.loop_collection(aber.add_aber, step=PASSVALUE['iter'], lens_name='ao188-OAP2') # high order CPA wfo.loop_collection(aber.add_zern_ab, tp.zernike_orders, aber.randomize_zern_values(tp.zernike_orders)/2) # low order CPA wfo.loop_collection(opx.prop_pass_lens, tp.fl_ao2, tp.dist_oap2_focus) ######################################## # SCExAO # ####################################### # SXExAO Reimaging 1 wfo.loop_collection(aber.add_aber, step=PASSVALUE['iter'], lens_name='SxOAPG') # high order CPA wfo.loop_collection(proper.prop_propagate, tp.fl_SxOAPG) # from AO188 focus to S-OAP1 wfo.loop_collection(opx.prop_pass_lens, tp.fl_SxOAPG, tp.fl_SxOAPG) # from SxOAP1 to tweeter-DM # # AO System if tp.use_ao: # WFS_map = ao.open_loop_wfs(wfo) wfo.loop_collection(ao.deformable_mirror, WFS_map, PASSVALUE['iter'], apodize=True, plane_name='tweeter', debug=sp.verbose) # ------------------------------------------------ wfo.loop_collection(proper.prop_propagate, tp.fl_SxOAPG) # from tweeter-DM to OAP2 # SXExAO Reimaging 2 wfo.loop_collection(aber.add_aber, step=PASSVALUE['iter'], lens_name='SxOAP2') # high order NCPA wfo.loop_collection(aber.add_zern_ab, tp.zernike_orders, aber.randomize_zern_values(tp.zernike_orders)/2) # low order NCPA wfo.loop_collection(opx.prop_pass_lens, tp.fl_SxOAP2, tp.fl_SxOAP2, plane_name='post-DM-focus') #tp.dist_sl2_focus # wfo.loop_collection(opx.check_sampling, PASSVALUE['iter'], "post-DM-focus", # getframeinfo(stack()[0][0]), units='nm') # Coronagraph # settings should be put into tp, and are not implicitly passed here wfo.loop_collection(cg.coronagraph, occulter_mode=tp.cg_type, plane_name='coronagraph') ######################################## # Focal Plane # ####################################### # Check Sampling in focal plane # wfo.focal_plane fft-shifts wfo from Fourier Space (origin==lower left corner) to object space (origin==center) cpx_planes, sampling = wfo.focal_plane() if sp.verbose: wfo.loop_collection(opx.check_sampling, PASSVALUE['iter'], "focal plane", getframeinfo(stack()[0][0]), units='nm') # opx.check_sampling(PASSVALUE['iter'], wfo, "focal plane", getframeinfo(stack()[0][0]), units='arcsec') if sp.verbose: print(f"Finished datacube at timestep = {PASSVALUE['iter']}") return cpx_planes, sampling
# coding=utf-8 # Copyright 2018 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Change log # 12/12/18 Jesse Vig Adapted to BERT model # 12/19/18 Jesse Vig Assorted cleanup. Changed orientation of attention matrices. Updated comments. """Module for postprocessing and displaying transformer attentions. This module is designed to be called from an ipython notebook. """ import json import os import numpy as np import IPython.display as display vis_html = """ <span style="user-select:none"> Layer: <select id="layer"></select> Attention: <select id="att_type"> <option value="all">All</option> <option value="a">Sentence A self-attention</option> <option value="b">Sentence B self-attention</option> <option value="ab">Sentence A -> Sentence B</option> <option value="ba">Sentence B -> Sentence A</option> <option value="avg_aa">Head-averaged sentence A self-attention</option> <option value="up2k_aa">Reduced up-to-k sentence A self-attention</option> </select> </span> <div id='vis'></div> """ __location__ = os.path.realpath( os.path.join(os.getcwd(), os.path.dirname(__file__))) vis_js = open(os.path.join(__location__, 'attention.js')).read() def show(tokens_a, tokens_b, attn, expt_params): """Displays attention visualization. expt_params: a dictionary possibly containing the following keys token_groups: array of nonnegative integers indicating how tokens are to be grouped in the viz e.g. "The quick brown fox jumps over the lazy dog ." with [1,1,1,1,0,0,2,2,2,0] produces the target groups "The quick brown fox" and "the lazy dog". ONLY WORKS WITH A->A, B->A FOR NOW. attn_sources: array of indices of the tokens (in tokens_a) with attention distributions that we are interested in. attn_target_groups: array of indices corresponding to token group that the self-attention of the corresponding source should be focusing on, for the purpose of computing binary cross-entropy. Only allowed values are 1 and 2. """ params = expt_params.keys() if 'token_groups' in params: token_groups = expt_params['token_groups'] assert(len(token_groups) == len(tokens_a) - 2) assert(all(type(i) is int for i in token_groups)) if 'attn_sources' in params and 'attn_target_groups' in params: attn_sources = expt_params['attn_sources'] attn_target_groups = expt_params['attn_target_groups'] assert(set(token_groups) == set([0,1,2])) assert(len(attn_sources) == len(attn_target_groups)) assert(set(attn_sources).issubset(set(range(len(tokens_a))))) assert(set(attn_target_groups).issubset(set([1,2]))) elif 'attn_sources' in params or 'attn_target_groups' in params: raise ValueError('Please provide both attn_sources and attn_target_groups, otherwise omit both of them.') attentions = _get_attentions(tokens_a, tokens_b, attn, expt_params) att_json = json.dumps(attentions) _show_attention(att_json) def _show_attention(att_json): display.display(display.HTML(vis_html)) display.display(display.Javascript('window.attention = %s' % att_json)) display.display(display.Javascript(vis_js)) def logmatmulexp(A, B): # assuming A,B have shape [1, n, n] max_A = np.max(A, -1, keepdims=True) max_B = np.max(B, -1, keepdims=True) C = np.matmul(np.exp(A - max_A), np.exp(B - max_B)) np.log(C, out=C) C += max_A + np.transpose(max_B, (0,2,1)) return C def _get_attentions(tokens_a, tokens_b, attn, expt_params): """Compute representation of the attention to pass to the d3 visualization Args: tokens_a: tokens in sentence A tokens_b: tokens in sentence B attn: numpy array, attention [num_layers, batch_size, num_heads, seq_len, seq_len] expt_params: dictionary containing customizations for the viz, e.g. target groups and inputs for computing cross-entropy Returns: Dictionary of attention representations with the structure: { 'all': Representations for showing all attentions at the same time. (source = AB, target = AB) 'a': Sentence A self-attention (source = A, target = A) 'b': Sentence B self-attention (source = B, target = B) 'ab': Sentence A -> Sentence B attention (source = A, target = B) 'ba': Sentence B -> Sentence A attention (source = B, target = A) } and each sub-dictionary has structure: { 'att': list of inter attentions matrices, one for each layer. Each is of shape [num_heads, source_seq_len, target_seq_len] 'top_text': list of source tokens, to be displayed on the left of the vis 'bot_text': list of target tokens, to be displayed on the right of the vis } """ all_attns = [] a_attns = [] b_attns = [] ab_attns = [] ba_attns = [] slice_a = slice(0, len(tokens_a)) # Positions corresponding to sentence A in input slice_b = slice(len(tokens_a), len(tokens_a) + len(tokens_b)) # Position corresponding to sentence B in input avg_attns = [] up2k_attns = [] # up2k = np.expand_dims(np.identity(len(tokens_a)), 0) # initialize accumulator for reduction operation log_up2k = None tokens_a_grouped = None no_sep_slice = slice(1, len(tokens_a)-1) # for renormalization so viz is not dominated by [CLS], [SEP] attentions if 'token_groups' in expt_params.keys(): token_groups = expt_params['token_groups'] token_groups.insert(0, 0) # add 0 for [CLS] token_groups.append(0) # add 0 for [SEP] d = {i: [idx for (idx,grp) in enumerate(token_groups) if grp == i] for i in set(token_groups)} tokens_a_grouped = [] for grp, idx_list in d.items(): if grp == 0: continue tokens_a_grouped.append(' '.join(tokens_a[idx] for idx in idx_list)) print("Token groups:", list(enumerate(tokens_a_grouped, 1))) else: print('Number of tokens:', len(tokens_a)) token_groups = None head_visual_scaling_factor = 1 up2k_visual_scaling_factor = 1 num_layers = len(attn) for layer in range(num_layers): layer_attn = attn[layer][0] # Get layer attention (assume batch size = 1), shape = [num_heads, seq_len, seq_len] all_attns.append(layer_attn.tolist()) # Append AB->AB attention for layer, across all heads b_attns.append(layer_attn[:, slice_b, slice_b].tolist()) # Append B->B attention for layer, across all heads ab_attns.append(layer_attn[:, slice_a, slice_b].tolist()) # Append A->B attention for layer, across all heads aa_attn = layer_attn[:, slice_a, slice_a] # keep only the a->a attentions aa_attn /= aa_attn.sum(axis=2, keepdims=True) # renormalize axis 2 of aa_attn after slicing head_avg = np.mean(aa_attn, axis=0, keepdims=True) # mean preserves normalization along axis 2 # normalizer = head_avg[:, :, no_sep_slice].sum(axis=2, keepdims=True) # avg_attns.append((head_visual_scaling_factor * head_avg / normalizer).tolist()) if log_up2k is None: log_up2k = np.log(head_avg) else: log_head_avg = np.log(head_avg) log_up2k = logmatmulexp(log_head_avg, log_up2k) # more numerically stable than chaining matmuls # np.matmul(head_avg, up2k, out=up2k) # up2k /= up2k.sum(axis=2, keepdims=True) # normalizer = np.exp(log_up2k)[:, :, no_sep_slice].sum(axis=2, keepdims=True) # up2k_attns.append((up2k_visual_scaling_factor * np.exp(up2k) / normalizer).tolist()) if token_groups is not None: a_attn_grouped = None ba_attn_grouped = None avg_attn_grouped = None up2k_attn_grouped = None for grp, idx_list in d.items(): if grp == 0: # group 0 only consists of ignored tokens continue if a_attn_grouped is None: # first iter a_attn_grouped = layer_attn[:, slice_a, idx_list].sum(axis=2, keepdims=True) ba_attn_grouped = layer_attn[:, slice_b, idx_list].sum(axis=2, keepdims=True) avg_attn_grouped = head_avg[:, slice_a, idx_list].sum(axis=2, keepdims=True) up2k_attn_grouped = np.exp(log_up2k)[:, slice_a, idx_list].sum(axis=2, keepdims=True) else: a_attn_grouped = np.append(a_attn_grouped, layer_attn[:, slice_a, idx_list].sum(axis=2, keepdims=True), axis=2) ba_attn_grouped = np.append(ba_attn_grouped, layer_attn[:, slice_b, idx_list].sum(axis=2, keepdims=True), axis=2) avg_attn_grouped = np.append(avg_attn_grouped, head_avg[:, slice_a, idx_list].sum(axis=2, keepdims=True), axis=2) up2k_attn_grouped = np.append(up2k_attn_grouped, np.exp(log_up2k)[:, slice_a, idx_list].sum(axis=2, keepdims=True), axis=2) a_attns.append(a_attn_grouped.tolist()) # Append A->A attention for layer, across all heads ba_attns.append(ba_attn_grouped.tolist()) # Append B->A attention for layer, across all heads normalizer = avg_attn_grouped.sum(axis=2, keepdims=True) avg_attns.append((head_visual_scaling_factor * avg_attn_grouped / normalizer).tolist()) normalizer = up2k_attn_grouped.sum(axis=2, keepdims=True) up2k_attns.append((up2k_visual_scaling_factor * up2k_attn_grouped / normalizer).tolist()) else: a_attns.append(layer_attn[:, slice_a, slice_a].tolist()) # Append A->A attention for layer, across all heads ba_attns.append(layer_attn[:, slice_b, slice_a].tolist()) # Append B->A attention for layer, across all heads normalizer = head_avg[:, :, no_sep_slice].sum(axis=2, keepdims=True) avg_attns.append((head_visual_scaling_factor * head_avg / normalizer).tolist()) normalizer = np.exp(log_up2k)[:, :, no_sep_slice].sum(axis=2, keepdims=True) up2k_attns.append((up2k_visual_scaling_factor * np.exp(log_up2k) / normalizer).tolist()) if 'attn_sources' in expt_params.keys(): attn_sources, attn_target_groups = expt_params['attn_sources'], expt_params['attn_target_groups'] print(f"{"Attention source":<20}{"Target group":<20}{"Binary cross-entropy"}") for idx in range(len(attn_sources)): source_idx = attn_sources[idx] target_group = attn_target_groups[idx] attn_vector = np.array(avg_attns)[:, 0, source_idx, target_group-1] # since bce(y,y*) = - y*log(y) - (1-y*)log(1-y) and we have y* = 1 in our use case bce = - np.log(attn_vector).sum() print(f"{tokens_a[source_idx]:<20}{tokens_a_grouped[target_group - 1]:<20}{bce:.5f}") attentions = { 'all': { 'att': all_attns, 'top_text': tokens_a + tokens_b, 'bot_text': tokens_a + tokens_b }, 'a': { 'att': a_attns, 'top_text': tokens_a, 'bot_text': tokens_a if token_groups is None else tokens_a_grouped }, 'b': { 'att': b_attns, 'top_text': tokens_b, 'bot_text': tokens_b }, 'ab': { 'att': ab_attns, 'top_text': tokens_a, 'bot_text': tokens_b }, 'ba': { 'att': ba_attns, 'top_text': tokens_b, 'bot_text': tokens_a if token_groups is None else tokens_a_grouped }, 'avg_aa': { 'att': avg_attns, 'top_text': tokens_a, 'bot_text': tokens_a if token_groups is None else tokens_a_grouped }, 'up2k_aa': { 'att': up2k_attns, 'top_text': tokens_a, 'bot_text': tokens_a if token_groups is None else tokens_a_grouped }, } return attentions
# coding=utf-8 # Copyright 2018 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Change log # 12/12/18 Jesse Vig Adapted to BERT model # 12/19/18 Jesse Vig Assorted cleanup. Changed orientation of attention matrices. Updated comments. """Module for postprocessing and displaying transformer attentions. This module is designed to be called from an ipython notebook. """ import json import os import numpy as np import IPython.display as display vis_html = """ <span style="user-select:none"> Layer: <select id="layer"></select> Attention: <select id="att_type"> <option value="all">All</option> <option value="a">Sentence A self-attention</option> <option value="b">Sentence B self-attention</option> <option value="ab">Sentence A -> Sentence B</option> <option value="ba">Sentence B -> Sentence A</option> <option value="avg_aa">Head-averaged sentence A self-attention</option> <option value="up2k_aa">Reduced up-to-k sentence A self-attention</option> </select> </span> <div id='vis'></div> """ __location__ = os.path.realpath( os.path.join(os.getcwd(), os.path.dirname(__file__))) vis_js = open(os.path.join(__location__, 'attention.js')).read() def show(tokens_a, tokens_b, attn, expt_params): """Displays attention visualization. expt_params: a dictionary possibly containing the following keys token_groups: array of nonnegative integers indicating how tokens are to be grouped in the viz e.g. "The quick brown fox jumps over the lazy dog ." with [1,1,1,1,0,0,2,2,2,0] produces the target groups "The quick brown fox" and "the lazy dog". ONLY WORKS WITH A->A, B->A FOR NOW. attn_sources: array of indices of the tokens (in tokens_a) with attention distributions that we are interested in. attn_target_groups: array of indices corresponding to token group that the self-attention of the corresponding source should be focusing on, for the purpose of computing binary cross-entropy. Only allowed values are 1 and 2. """ params = expt_params.keys() if 'token_groups' in params: token_groups = expt_params['token_groups'] assert(len(token_groups) == len(tokens_a) - 2) assert(all(type(i) is int for i in token_groups)) if 'attn_sources' in params and 'attn_target_groups' in params: attn_sources = expt_params['attn_sources'] attn_target_groups = expt_params['attn_target_groups'] assert(set(token_groups) == set([0,1,2])) assert(len(attn_sources) == len(attn_target_groups)) assert(set(attn_sources).issubset(set(range(len(tokens_a))))) assert(set(attn_target_groups).issubset(set([1,2]))) elif 'attn_sources' in params or 'attn_target_groups' in params: raise ValueError('Please provide both attn_sources and attn_target_groups, otherwise omit both of them.') attentions = _get_attentions(tokens_a, tokens_b, attn, expt_params) att_json = json.dumps(attentions) _show_attention(att_json) def _show_attention(att_json): display.display(display.HTML(vis_html)) display.display(display.Javascript('window.attention = %s' % att_json)) display.display(display.Javascript(vis_js)) def logmatmulexp(A, B): # assuming A,B have shape [1, n, n] max_A = np.max(A, -1, keepdims=True) max_B = np.max(B, -1, keepdims=True) C = np.matmul(np.exp(A - max_A), np.exp(B - max_B)) np.log(C, out=C) C += max_A + np.transpose(max_B, (0,2,1)) return C def _get_attentions(tokens_a, tokens_b, attn, expt_params): """Compute representation of the attention to pass to the d3 visualization Args: tokens_a: tokens in sentence A tokens_b: tokens in sentence B attn: numpy array, attention [num_layers, batch_size, num_heads, seq_len, seq_len] expt_params: dictionary containing customizations for the viz, e.g. target groups and inputs for computing cross-entropy Returns: Dictionary of attention representations with the structure: { 'all': Representations for showing all attentions at the same time. (source = AB, target = AB) 'a': Sentence A self-attention (source = A, target = A) 'b': Sentence B self-attention (source = B, target = B) 'ab': Sentence A -> Sentence B attention (source = A, target = B) 'ba': Sentence B -> Sentence A attention (source = B, target = A) } and each sub-dictionary has structure: { 'att': list of inter attentions matrices, one for each layer. Each is of shape [num_heads, source_seq_len, target_seq_len] 'top_text': list of source tokens, to be displayed on the left of the vis 'bot_text': list of target tokens, to be displayed on the right of the vis } """ all_attns = [] a_attns = [] b_attns = [] ab_attns = [] ba_attns = [] slice_a = slice(0, len(tokens_a)) # Positions corresponding to sentence A in input slice_b = slice(len(tokens_a), len(tokens_a) + len(tokens_b)) # Position corresponding to sentence B in input avg_attns = [] up2k_attns = [] # up2k = np.expand_dims(np.identity(len(tokens_a)), 0) # initialize accumulator for reduction operation log_up2k = None tokens_a_grouped = None no_sep_slice = slice(1, len(tokens_a)-1) # for renormalization so viz is not dominated by [CLS], [SEP] attentions if 'token_groups' in expt_params.keys(): token_groups = expt_params['token_groups'] token_groups.insert(0, 0) # add 0 for [CLS] token_groups.append(0) # add 0 for [SEP] d = {i: [idx for (idx,grp) in enumerate(token_groups) if grp == i] for i in set(token_groups)} tokens_a_grouped = [] for grp, idx_list in d.items(): if grp == 0: continue tokens_a_grouped.append(' '.join(tokens_a[idx] for idx in idx_list)) print("Token groups:", list(enumerate(tokens_a_grouped, 1))) else: print('Number of tokens:', len(tokens_a)) token_groups = None head_visual_scaling_factor = 1 up2k_visual_scaling_factor = 1 num_layers = len(attn) for layer in range(num_layers): layer_attn = attn[layer][0] # Get layer attention (assume batch size = 1), shape = [num_heads, seq_len, seq_len] all_attns.append(layer_attn.tolist()) # Append AB->AB attention for layer, across all heads b_attns.append(layer_attn[:, slice_b, slice_b].tolist()) # Append B->B attention for layer, across all heads ab_attns.append(layer_attn[:, slice_a, slice_b].tolist()) # Append A->B attention for layer, across all heads aa_attn = layer_attn[:, slice_a, slice_a] # keep only the a->a attentions aa_attn /= aa_attn.sum(axis=2, keepdims=True) # renormalize axis 2 of aa_attn after slicing head_avg = np.mean(aa_attn, axis=0, keepdims=True) # mean preserves normalization along axis 2 # normalizer = head_avg[:, :, no_sep_slice].sum(axis=2, keepdims=True) # avg_attns.append((head_visual_scaling_factor * head_avg / normalizer).tolist()) if log_up2k is None: log_up2k = np.log(head_avg) else: log_head_avg = np.log(head_avg) log_up2k = logmatmulexp(log_head_avg, log_up2k) # more numerically stable than chaining matmuls # np.matmul(head_avg, up2k, out=up2k) # up2k /= up2k.sum(axis=2, keepdims=True) # normalizer = np.exp(log_up2k)[:, :, no_sep_slice].sum(axis=2, keepdims=True) # up2k_attns.append((up2k_visual_scaling_factor * np.exp(up2k) / normalizer).tolist()) if token_groups is not None: a_attn_grouped = None ba_attn_grouped = None avg_attn_grouped = None up2k_attn_grouped = None for grp, idx_list in d.items(): if grp == 0: # group 0 only consists of ignored tokens continue if a_attn_grouped is None: # first iter a_attn_grouped = layer_attn[:, slice_a, idx_list].sum(axis=2, keepdims=True) ba_attn_grouped = layer_attn[:, slice_b, idx_list].sum(axis=2, keepdims=True) avg_attn_grouped = head_avg[:, slice_a, idx_list].sum(axis=2, keepdims=True) up2k_attn_grouped = np.exp(log_up2k)[:, slice_a, idx_list].sum(axis=2, keepdims=True) else: a_attn_grouped = np.append(a_attn_grouped, layer_attn[:, slice_a, idx_list].sum(axis=2, keepdims=True), axis=2) ba_attn_grouped = np.append(ba_attn_grouped, layer_attn[:, slice_b, idx_list].sum(axis=2, keepdims=True), axis=2) avg_attn_grouped = np.append(avg_attn_grouped, head_avg[:, slice_a, idx_list].sum(axis=2, keepdims=True), axis=2) up2k_attn_grouped = np.append(up2k_attn_grouped, np.exp(log_up2k)[:, slice_a, idx_list].sum(axis=2, keepdims=True), axis=2) a_attns.append(a_attn_grouped.tolist()) # Append A->A attention for layer, across all heads ba_attns.append(ba_attn_grouped.tolist()) # Append B->A attention for layer, across all heads normalizer = avg_attn_grouped.sum(axis=2, keepdims=True) avg_attns.append((head_visual_scaling_factor * avg_attn_grouped / normalizer).tolist()) normalizer = up2k_attn_grouped.sum(axis=2, keepdims=True) up2k_attns.append((up2k_visual_scaling_factor * up2k_attn_grouped / normalizer).tolist()) else: a_attns.append(layer_attn[:, slice_a, slice_a].tolist()) # Append A->A attention for layer, across all heads ba_attns.append(layer_attn[:, slice_b, slice_a].tolist()) # Append B->A attention for layer, across all heads normalizer = head_avg[:, :, no_sep_slice].sum(axis=2, keepdims=True) avg_attns.append((head_visual_scaling_factor * head_avg / normalizer).tolist()) normalizer = np.exp(log_up2k)[:, :, no_sep_slice].sum(axis=2, keepdims=True) up2k_attns.append((up2k_visual_scaling_factor * np.exp(log_up2k) / normalizer).tolist()) if 'attn_sources' in expt_params.keys(): attn_sources, attn_target_groups = expt_params['attn_sources'], expt_params['attn_target_groups'] print(f"{'Attention source':<20}{'Target group':<20}{'Binary cross-entropy'}") for idx in range(len(attn_sources)): source_idx = attn_sources[idx] target_group = attn_target_groups[idx] attn_vector = np.array(avg_attns)[:, 0, source_idx, target_group-1] # since bce(y,y*) = - y*log(y) - (1-y*)log(1-y) and we have y* = 1 in our use case bce = - np.log(attn_vector).sum() print(f"{tokens_a[source_idx]:<20}{tokens_a_grouped[target_group - 1]:<20}{bce:.5f}") attentions = { 'all': { 'att': all_attns, 'top_text': tokens_a + tokens_b, 'bot_text': tokens_a + tokens_b }, 'a': { 'att': a_attns, 'top_text': tokens_a, 'bot_text': tokens_a if token_groups is None else tokens_a_grouped }, 'b': { 'att': b_attns, 'top_text': tokens_b, 'bot_text': tokens_b }, 'ab': { 'att': ab_attns, 'top_text': tokens_a, 'bot_text': tokens_b }, 'ba': { 'att': ba_attns, 'top_text': tokens_b, 'bot_text': tokens_a if token_groups is None else tokens_a_grouped }, 'avg_aa': { 'att': avg_attns, 'top_text': tokens_a, 'bot_text': tokens_a if token_groups is None else tokens_a_grouped }, 'up2k_aa': { 'att': up2k_attns, 'top_text': tokens_a, 'bot_text': tokens_a if token_groups is None else tokens_a_grouped }, } return attentions
import time import logging import os import sys import json from martypy import Marty jointNames = [ 'left hip', 'left twist', 'left knee', 'right hip', 'right twist', 'right knee', 'left arm', 'right arm', 'eyes' ] def betweenCommands(): time.sleep(3) def testBoolCmd(cmdStr: str, cmdRslt: bool): logger.info(f"{cmdStr}, rslt = {cmdRslt}") betweenCommands() def loggingCB(logStr: str) -> None: logger.debug(logStr) # Check the log folder exists logsFolder = "logs" if not os.path.exists(logsFolder): os.mkdir(logsFolder) # Log file name logFileName = "MartyPyLogTest_" + time.strftime("%Y%m%d-%H%M%S") + ".log" logFileName = os.path.join(logsFolder, logFileName) print("Logging to file " + logFileName) # Setup logging logging.basicConfig(filename=logFileName, format='%(levelname)s: %(asctime)s %(funcName)s(%(lineno)d) -- %(message)s', level=logging.DEBUG) logger = logging.getLogger("MartyPyTest") handler = logging.StreamHandler(sys.stdout) handler.setLevel(logging.DEBUG) logger.addHandler(handler) mymarty = None try: # mymarty = Marty('wifi', '192.168.86.11') # mymarty = Marty('wifi', '192.168.86.11', subscribeRateHz=0) mymarty = Marty('socket://192.168.86.41') # mymarty = Marty("usb", "COM9", debug=True) # mymarty = Marty('usb:///dev/tty.SLAB_USBtoUART', debug=True) except Exception as excp: logger.debug(f"Couldn't connect to marty {excp}") exit() mymarty.register_logging_callback(loggingCB) martySysInfo = mymarty.get_system_info() martyVersion2 = martySysInfo.get("HardwareVersion", "1.0") == "2.0" if martyVersion2: logger.info(f"Marty has {len(mymarty.get_hw_elems_list())} hardware parts") if martyVersion2: logger.info(f"Calibration flag {mymarty.is_calibrated()}") testBoolCmd("Calibration flag clear", mymarty.clear_calibration()) if martyVersion2: logger.info(f"Calibration flag should be False ... {mymarty.is_calibrated()}") assert not mymarty.is_calibrated() testBoolCmd("Calibration flag set", mymarty.save_calibration()) if martyVersion2: logger.info(f"Calibration flag should be True ... {mymarty.is_calibrated()}") time.sleep(0.1) if martyVersion2: assert mymarty.is_calibrated() logger.info(f"Marty interface stats {json.dumps(mymarty.get_interface_stats())}") testBoolCmd("Get ready", mymarty.get_ready()) testBoolCmd("Circle Dance", mymarty.circle_dance()) testBoolCmd("Eyes excited", mymarty.eyes('excited')) testBoolCmd("Eyes wide", mymarty.eyes('wide')) testBoolCmd("Eyes angry", mymarty.eyes('angry')) testBoolCmd("Eyes normal", mymarty.eyes('normal')) testBoolCmd("Kick left", mymarty.kick('left')) testBoolCmd("Kick right", mymarty.kick('right')) testBoolCmd("Stop", mymarty.stop()) testBoolCmd("Arms 45", mymarty.arms(45, 45, 500)) testBoolCmd("Arms 0", mymarty.arms(0, 0, 500)) testBoolCmd("Arms 0", mymarty.play_sound("disbelief")) testBoolCmd("Arms 0", mymarty.play_sound("excited")) testBoolCmd("Arms 0", mymarty.play_sound("screenfree")) logger.info(f"Marty interface stats {json.dumps(mymarty.get_interface_stats())}") for i in range(9): testBoolCmd(f"Move joint {i}", mymarty.move_joint(i, i * 10, 500)) for jointName in jointNames: testBoolCmd(f"Move joint {jointName}", mymarty.move_joint(jointName, 123, 500)) logger.info(f"Accelerometer x {mymarty.get_accelerometer("x")}") logger.info(f"Accelerometer y { mymarty.get_accelerometer("y")}") logger.info(f"Accelerometer z { mymarty.get_accelerometer("z")}") if martyVersion2: testBoolCmd("Dance", mymarty.dance()) testBoolCmd("Eyes wiggle", mymarty.eyes('wiggle')) testBoolCmd("Hold position", mymarty.hold_position(6000)) testBoolCmd("is_moving", mymarty.is_moving()) logger.info("Joint positions: ", [mymarty.get_joint_position(pos) for pos in range(9)]) time.sleep(5) mymarty.close()
import time import logging import os import sys import json from martypy import Marty jointNames = [ 'left hip', 'left twist', 'left knee', 'right hip', 'right twist', 'right knee', 'left arm', 'right arm', 'eyes' ] def betweenCommands(): time.sleep(3) def testBoolCmd(cmdStr: str, cmdRslt: bool): logger.info(f"{cmdStr}, rslt = {cmdRslt}") betweenCommands() def loggingCB(logStr: str) -> None: logger.debug(logStr) # Check the log folder exists logsFolder = "logs" if not os.path.exists(logsFolder): os.mkdir(logsFolder) # Log file name logFileName = "MartyPyLogTest_" + time.strftime("%Y%m%d-%H%M%S") + ".log" logFileName = os.path.join(logsFolder, logFileName) print("Logging to file " + logFileName) # Setup logging logging.basicConfig(filename=logFileName, format='%(levelname)s: %(asctime)s %(funcName)s(%(lineno)d) -- %(message)s', level=logging.DEBUG) logger = logging.getLogger("MartyPyTest") handler = logging.StreamHandler(sys.stdout) handler.setLevel(logging.DEBUG) logger.addHandler(handler) mymarty = None try: # mymarty = Marty('wifi', '192.168.86.11') # mymarty = Marty('wifi', '192.168.86.11', subscribeRateHz=0) mymarty = Marty('socket://192.168.86.41') # mymarty = Marty("usb", "COM9", debug=True) # mymarty = Marty('usb:///dev/tty.SLAB_USBtoUART', debug=True) except Exception as excp: logger.debug(f"Couldn't connect to marty {excp}") exit() mymarty.register_logging_callback(loggingCB) martySysInfo = mymarty.get_system_info() martyVersion2 = martySysInfo.get("HardwareVersion", "1.0") == "2.0" if martyVersion2: logger.info(f"Marty has {len(mymarty.get_hw_elems_list())} hardware parts") if martyVersion2: logger.info(f"Calibration flag {mymarty.is_calibrated()}") testBoolCmd("Calibration flag clear", mymarty.clear_calibration()) if martyVersion2: logger.info(f"Calibration flag should be False ... {mymarty.is_calibrated()}") assert not mymarty.is_calibrated() testBoolCmd("Calibration flag set", mymarty.save_calibration()) if martyVersion2: logger.info(f"Calibration flag should be True ... {mymarty.is_calibrated()}") time.sleep(0.1) if martyVersion2: assert mymarty.is_calibrated() logger.info(f"Marty interface stats {json.dumps(mymarty.get_interface_stats())}") testBoolCmd("Get ready", mymarty.get_ready()) testBoolCmd("Circle Dance", mymarty.circle_dance()) testBoolCmd("Eyes excited", mymarty.eyes('excited')) testBoolCmd("Eyes wide", mymarty.eyes('wide')) testBoolCmd("Eyes angry", mymarty.eyes('angry')) testBoolCmd("Eyes normal", mymarty.eyes('normal')) testBoolCmd("Kick left", mymarty.kick('left')) testBoolCmd("Kick right", mymarty.kick('right')) testBoolCmd("Stop", mymarty.stop()) testBoolCmd("Arms 45", mymarty.arms(45, 45, 500)) testBoolCmd("Arms 0", mymarty.arms(0, 0, 500)) testBoolCmd("Arms 0", mymarty.play_sound("disbelief")) testBoolCmd("Arms 0", mymarty.play_sound("excited")) testBoolCmd("Arms 0", mymarty.play_sound("screenfree")) logger.info(f"Marty interface stats {json.dumps(mymarty.get_interface_stats())}") for i in range(9): testBoolCmd(f"Move joint {i}", mymarty.move_joint(i, i * 10, 500)) for jointName in jointNames: testBoolCmd(f"Move joint {jointName}", mymarty.move_joint(jointName, 123, 500)) logger.info(f"Accelerometer x {mymarty.get_accelerometer('x')}") logger.info(f"Accelerometer y { mymarty.get_accelerometer('y')}") logger.info(f"Accelerometer z { mymarty.get_accelerometer('z')}") if martyVersion2: testBoolCmd("Dance", mymarty.dance()) testBoolCmd("Eyes wiggle", mymarty.eyes('wiggle')) testBoolCmd("Hold position", mymarty.hold_position(6000)) testBoolCmd("is_moving", mymarty.is_moving()) logger.info("Joint positions: ", [mymarty.get_joint_position(pos) for pos in range(9)]) time.sleep(5) mymarty.close()
#!/usr/bin/env python3 # -*- coding: utf-8 -*- from __future__ import print_function from .sql_updates import check_and_insert_user_agent from .sql_updates import get_username_random, get_username_to_unfollow_random from .sql_updates import ( get_usernames_first, get_usernames, get_username_row_count, check_if_userid_exists, get_medias_to_unlike, update_media_complete, ) from .sql_updates import insert_media, insert_username, insert_unfollow_count from .sql_updates import check_already_followed, check_already_unfollowed from .sql_updates import check_and_update, check_already_liked import re import time import sqlite3 import signal import random import logging import json import itertools import datetime import atexit import importlib import os import sys import pickle python_version_test = f"If you are reading this error, you are not running Python 3.6 or greater. Check 'python --version' or 'python3 --version'." # Required Dependencies and Modules, offer to install them automatically # Keep fake_useragent last, quirk for pythonanywhere required_modules = ["requests", "instaloader", "threading", "fake_useragent"] for modname in required_modules: try: # try to import the module normally and put it in globals globals()[modname] = importlib.import_module(modname) except ImportError as e: if modname is not "fake_useragent": print( f"Failed to load module {modname}. Make sure you have installed correctly dependencies in requirements.txt." ) quit() class InstaBot: """ Instabot.py version 1.2.2 """ database_name = None session_file = None follows_db = None follows_db_c = None url = "https://www.instagram.com/" url_tag = "https://www.instagram.com/explore/tags/%s/?__a=1" url_location = "https://www.instagram.com/explore/locations/%s/?__a=1" url_likes = "https://www.instagram.com/web/likes/%s/like/" url_unlike = "https://www.instagram.com/web/likes/%s/unlike/" url_comment = "https://www.instagram.com/web/comments/%s/add/" url_follow = "https://www.instagram.com/web/friendships/%s/follow/" url_unfollow = "https://www.instagram.com/web/friendships/%s/unfollow/" url_login = "https://www.instagram.com/accounts/login/ajax/" url_logout = "https://www.instagram.com/accounts/logout/" url_media_detail = "https://www.instagram.com/p/%s/?__a=1" url_media = "https://www.instagram.com/p/%s/" url_user_detail = "https://www.instagram.com/%s/" api_user_detail = "https://i.instagram.com/api/v1/users/%s/info/" instabot_repo_update = ( "https://github.com/instabot-py/instabot.py/raw/master/version.txt" ) user_agent = "" "" accept_language = "en-US,en;q=0.5" # If instagram ban you - query return 400 error. error_400 = 0 # If you have 3 400 error in row - looks like you banned. error_400_to_ban = 3 # If InstaBot think you are banned - going to sleep. ban_sleep_time = 3 * 60 * 60 # All counter. bot_mode = 0 like_counter = 0 follow_counter = 0 unfollow_counter = 0 comments_counter = 0 current_user = "hajka" current_index = 0 current_id = "abcds" # List of user_id, that bot follow bot_follow_list = [] user_info_list = [] user_list = [] ex_user_list = [] unwanted_username_list = [] is_checked = False is_selebgram = False is_fake_account = False is_active_user = False is_following = False is_follower = False is_rejected = False is_self_checking = False is_by_tag = False is_follower_number = 0 self_following = 0 self_follower = 0 # Log setting. logging.basicConfig(filename="errors.log", level=logging.INFO) log_file_path = "" log_file = 0 # Other. user_id = 0 media_by_tag = 0 media_on_feed = [] media_by_user = [] login_status = False by_location = False # Running Times start_at_h = 0 start_at_m = 0 end_at_h = 23 end_at_m = 59 # For new_auto_mod next_iteration = { "Like": 0, "Unlike": 0, "Follow": 0, "Unfollow": 0, "Comments": 0, "Populate": 0, } prog_run = True def __init__( self, login, password, like_per_day=1000, unlike_per_day=0, media_max_like=150, media_min_like=0, user_max_follow=0, user_min_follow=0, follow_per_day=0, time_till_unlike=3 * 24 * 60 * 60, # Cannot be zero follow_time=5 * 60 * 60, # Cannot be zero follow_time_enabled=True, unfollow_per_day=0, unfollow_recent_feed=True, start_at_h=0, start_at_m=0, end_at_h=23, end_at_m=59, database_name=None, session_file=None, # False = disabled, None = Will use default username.session notation, string = will use that as filename comment_list=[ ["this", "the", "your"], ["photo", "picture", "pic", "shot", "snapshot"], ["is", "looks", "feels", "is really"], [ "great", "super", "good", "very good", "good", "wow", "WOW", "cool", "GREAT", "magnificent", "magical", "very cool", "stylish", "beautiful", "so beautiful", "so stylish", "so professional", "lovely", "so lovely", "very lovely", "glorious", "so glorious", "very glorious", "adorable", "excellent", "amazing", ], [".", "..", "...", "!", "!!", "!!!"], ], comments_per_day=0, tag_list=["cat", "car", "dog"], max_like_for_one_tag=5, unfollow_break_min=15, unfollow_break_max=30, log_mod=0, proxy="", user_blacklist={}, tag_blacklist=[], unwanted_username_list=[], unfollow_whitelist=[], ): if session_file is not None and session_file is not False: self.session_file = session_file elif session_file is False: self.session_file = None else: self.session_file = f"{login.lower()}.session" if database_name is not None: self.database_name = database_name else: self.database_name = f"{login.lower()}.db" self.follows_db = sqlite3.connect( self.database_name, timeout=0, isolation_level=None ) self.follows_db_c = self.follows_db.cursor() check_and_update(self) list_of_ua = [ "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; FSL 7.0.6.01001)", "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; FSL 7.0.7.01001)", "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; FSL 7.0.5.01003)", "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:12.0) Gecko/20100101 Firefox/12.0", "Mozilla/5.0 (X11; U; Linux x86_64; de; rv:1.9.2.8) Gecko/20100723 Ubuntu/10.04 (lucid) Firefox/3.6.8", "Mozilla/5.0 (Windows NT 5.1; rv:13.0) Gecko/20100101 Firefox/13.0.1", "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:11.0) Gecko/20100101 Firefox/11.0", "Mozilla/5.0 (X11; U; Linux x86_64; de; rv:1.9.2.8) Gecko/20100723 Ubuntu/10.04 (lucid) Firefox/3.6.8", "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; .NET CLR 1.0.3705)", "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0)", "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)", "Opera/9.80 (Windows NT 5.1; U; en) Presto/2.10.289 Version/12.01", "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727)", "Mozilla/5.0 (Windows NT 5.1; rv:5.0.1) Gecko/20100101 Firefox/5.0.1", "Mozilla/5.0 (Windows NT 6.1; rv:5.0) Gecko/20100101 Firefox/5.02", "Mozilla/5.0 (Windows NT 6.0) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.112 Safari/535.1", "Mozilla/4.0 (compatible; MSIE 6.0; MSIE 5.5; Windows NT 5.0) Opera 7.02 Bork-edition [en]", ] try: fallback = random.sample(list_of_ua, 1) fake_ua = fake_useragent.UserAgent(fallback=fallback[0]) self.user_agent = check_and_insert_user_agent(self, str(fake_ua)) except: fake_ua = random.sample(list_of_ua, 1) self.user_agent = check_and_insert_user_agent(self, str(fake_ua[0])) self.bot_start = datetime.datetime.now() self.bot_start_ts = time.time() self.start_at_h = start_at_h self.start_at_m = start_at_m self.end_at_h = end_at_h self.end_at_m = end_at_m self.unfollow_break_min = unfollow_break_min self.unfollow_break_max = unfollow_break_max self.user_blacklist = user_blacklist self.tag_blacklist = tag_blacklist self.unfollow_whitelist = unfollow_whitelist self.comment_list = comment_list self.instaloader = instaloader.Instaloader() self.unfollow_recent_feed = unfollow_recent_feed self.time_in_day = 24 * 60 * 60 # Like self.like_per_day = like_per_day if self.like_per_day != 0: self.like_delay = self.time_in_day / self.like_per_day # Unlike self.time_till_unlike = time_till_unlike self.unlike_per_day = unlike_per_day if self.unlike_per_day != 0: self.unlike_per_day = self.time_in_day / self.unlike_per_day # Follow self.follow_time = follow_time # Cannot be zero self.follow_time_enabled = follow_time_enabled self.follow_per_day = follow_per_day if self.follow_per_day != 0: self.follow_delay = self.time_in_day / self.follow_per_day # Unfollow self.unfollow_per_day = unfollow_per_day if self.unfollow_per_day != 0: self.unfollow_delay = self.time_in_day / self.unfollow_per_day # Comment self.comments_per_day = comments_per_day if self.comments_per_day != 0: self.comments_delay = self.time_in_day / self.comments_per_day # Don't like if media have more than n likes. self.media_max_like = media_max_like # Don't like if media have less than n likes. self.media_min_like = media_min_like # Don't follow if user have more than n followers. self.user_max_follow = user_max_follow # Don't follow if user have less than n followers. self.user_min_follow = user_min_follow # Auto mod seting: # Default list of tag. self.tag_list = tag_list # Get random tag, from tag_list, and like (1 to n) times. self.max_like_for_one_tag = max_like_for_one_tag # log_mod 0 to console, 1 to file self.log_mod = log_mod self.s = requests.Session() self.c = requests.Session() # if you need proxy make something like this: # self.s.proxies = {"https" : "http://proxyip:proxyport"} # by @ageorgios if proxy != "": proxies = {"http": f"http://{proxy}", "https": f"http://{proxy}"} self.s.proxies.update(proxies) self.c.proxies.update(proxies) # convert login to lower self.user_login = login.lower() self.user_password = password self.bot_mode = 0 self.media_by_tag = [] self.media_on_feed = [] self.media_by_user = [] self.current_user_info = "" self.unwanted_username_list = unwanted_username_list now_time = datetime.datetime.now() self.check_for_bot_update() log_string = "Instabot v1.2.2/0 started at %s:" % ( now_time.strftime("%d.%m.%Y %H:%M") ) self.write_log(log_string) self.login() self.populate_user_blacklist() signal.signal(signal.SIGINT, self.cleanup) signal.signal(signal.SIGTERM, self.cleanup) atexit.register(self.cleanup) self.instaload = instaloader.Instaloader() def check_for_bot_update(self): self.write_log("Checking for updates...") try: # CHANGE THIS TO OFFICIAL REPO IF KEPT updated_timestamp = self.c.get(self.instabot_repo_update) current_version_timestamp = open("version.txt", "r") if int(updated_timestamp.text) > int(current_version_timestamp.read()): self.write_log( ">>> UPDATE AVAILABLE <<< Please update Instabot. You are running an older version." ) else: self.write_log("You are running the latest stable version") except: self.write_log("Could not check for updates") def get_user_id_by_username(self, user_name): url_info = self.url_user_detail % (user_name) info = self.s.get(url_info) json_info = json.loads( re.search( "window._sharedData = (.*?);</script>", info.text, re.DOTALL ).group(1) ) id_user = json_info["entry_data"]["ProfilePage"][0]["graphql"]["user"]["id"] return id_user def populate_user_blacklist(self): for user in self.user_blacklist: user_id_url = self.url_user_detail % (user) info = self.s.get(user_id_url) # prevent error if 'Account of user was deleted or link is invalid from json import JSONDecodeError try: all_data = json.loads( re.search( "window._sharedData = (.*?);</script>", info.text, re.DOTALL ).group(1) ) except JSONDecodeError as e: self.write_log( f"Account of user {user} was deleted or link is " "invalid" ) else: # prevent exception if user have no media id_user = all_data["entry_data"]["ProfilePage"][0]["graphql"]["user"][ "id" ] # Update the user_name with the user_id self.user_blacklist[user] = id_user self.write_log(f"Blacklisted user {user} added with ID: {id_user}") time.sleep(5 * random.random()) def login(self): successfulLogin = False self.s.headers.update( { "Accept": "*/*", "Accept-Language": self.accept_language, "Accept-Encoding": "gzip, deflate, br", "Connection": "keep-alive", "Host": "www.instagram.com", "Origin": "https://www.instagram.com", "Referer": "https://www.instagram.com/", "User-Agent": self.user_agent, "X-Instagram-AJAX": "1", "Content-Type": "application/x-www-form-urlencoded", "X-Requested-With": "XMLHttpRequest", } ) if self.session_file is not None and os.path.isfile(self.session_file): self.write_log(f"Found session file {self.session_file}") successfulLogin = True with open(self.session_file, "rb") as i: cookies = pickle.load(i) self.s.cookies.update(cookies) else: self.write_log("Trying to login as {}...".format(self.user_login)) self.login_post = { "username": self.user_login, "password": self.user_password, } r = self.s.get(self.url) csrf_token = re.search('(?<="csrf_token":")\w+', r.text).group(0) self.s.headers.update({"X-CSRFToken": csrf_token}) time.sleep(5 * random.random()) login = self.s.post( self.url_login, data=self.login_post, allow_redirects=True ) if ( login.status_code != 200 and login.status_code != 400 ): # Handling Other Status Codes and making debug easier!! self.write_log("Request didn't return 200 as status code!") self.write_log("Here is more info for debbugin or creating an issue") print("=" * 15) print("Response Status: ", login.status_code) print("=" * 15) print("Response Content:\n", login.text) print("=" * 15) print("Response Header:\n", login.headers) print("=" * 15) return loginResponse = login.json() try: self.csrftoken = login.cookies["csrftoken"] self.s.headers.update({"X-CSRFToken": login.cookies["csrftoken"]}) except Exception as e: self.write_log("Something wrong with login") self.write_log(login.text) if loginResponse.get("errors"): self.write_log( "Something is wrong with Instagram! Please try again later..." ) for error in loginResponse["errors"]["error"]: self.write_log(f"Error =>{error}") return if loginResponse.get("message") == "checkpoint_required": try: if "instagram.com" in loginResponse["checkpoint_url"]: challenge_url = loginResponse["checkpoint_url"] else: challenge_url = ( f"https://instagram.com{loginResponse["checkpoint_url"]}" ) self.write_log(f"Challenge required at {challenge_url}") with self.s as clg: clg.headers.update( { "Accept": "*/*", "Accept-Language": self.accept_language, "Accept-Encoding": "gzip, deflate, br", "Connection": "keep-alive", "Host": "www.instagram.com", "Origin": "https://www.instagram.com", "User-Agent": self.user_agent, "X-Instagram-AJAX": "1", "Content-Type": "application/x-www-form-urlencoded", "x-requested-with": "XMLHttpRequest", } ) # Get challenge page challenge_request_explore = clg.get(challenge_url) # Get CSRF Token from challenge page challenge_csrf_token = re.search( '(?<="csrf_token":")\w+', challenge_request_explore.text ).group(0) # Get Rollout Hash from challenge page rollout_hash = re.search( '(?<="rollout_hash":")\w+', challenge_request_explore.text ).group(0) # Ask for option 1 from challenge, which is usually Email or Phone challenge_post = {"choice": 1} # Update headers for challenge submit page clg.headers.update({"X-CSRFToken": challenge_csrf_token}) clg.headers.update({"Referer": challenge_url}) # Request instagram to send a code challenge_request_code = clg.post( challenge_url, data=challenge_post, allow_redirects=True ) # User should receive a code soon, ask for it challenge_userinput_code = input( "Challenge Required.\n\nEnter the code sent to your mail/phone: " ) challenge_security_post = { "security_code": int(challenge_userinput_code) } complete_challenge = clg.post( challenge_url, data=challenge_security_post, allow_redirects=True, ) if complete_challenge.status_code != 200: self.write_log("Entered code is wrong, Try again later!") return self.csrftoken = complete_challenge.cookies["csrftoken"] self.s.headers.update( {"X-CSRFToken": self.csrftoken, "X-Instagram-AJAX": "1"} ) successfulLogin = complete_challenge.status_code == 200 except Exception as err: print(f"Login failed, response: \n\n{login.text} {err}") quit() elif loginResponse.get("authenticated") is False: self.write_log("Login error! Check your login data!") return else: rollout_hash = re.search('(?<="rollout_hash":")\w+', r.text).group(0) self.s.headers.update({"X-Instagram-AJAX": rollout_hash}) successfulLogin = True # ig_vw=1536; ig_pr=1.25; ig_vh=772; ig_or=landscape-primary; self.s.cookies["csrftoken"] = self.csrftoken self.s.cookies["ig_vw"] = "1536" self.s.cookies["ig_pr"] = "1.25" self.s.cookies["ig_vh"] = "772" self.s.cookies["ig_or"] = "landscape-primary" time.sleep(5 * random.random()) if successfulLogin: r = self.s.get("https://www.instagram.com/") self.csrftoken = re.search('(?<="csrf_token":")\w+', r.text).group(0) self.s.cookies["csrftoken"] = self.csrftoken self.s.headers.update({"X-CSRFToken": self.csrftoken}) finder = r.text.find(self.user_login) if finder != -1: self.user_id = self.get_user_id_by_username(self.user_login) self.login_status = True self.write_log(f"{self.user_login} login success!\n") if self.session_file is not None: self.write_log( f"Saving cookies to session file {self.session_file}" ) with open(self.session_file, "wb") as output: pickle.dump(self.s.cookies, output, pickle.HIGHEST_PROTOCOL) else: self.login_status = False self.write_log("Login error! Check your login data!") if self.session_file is not None and os.path.isfile(self.session_file): try: os.remove(self.session_file) except: self.write_log( "Could not delete session file. Please delete manually" ) self.prog_run = False else: self.write_log("Login error! Connection error!") def logout(self): now_time = datetime.datetime.now() log_string = ( "Logout: likes - %i, follow - %i, unfollow - %i, comments - %i." % ( self.like_counter, self.follow_counter, self.unfollow_counter, self.comments_counter, ) ) self.write_log(log_string) work_time = datetime.datetime.now() - self.bot_start self.write_log(f"Bot work time: {work_time}") try: logout_post = {"csrfmiddlewaretoken": self.csrftoken} logout = self.s.post(self.url_logout, data=logout_post) self.write_log("Logout success!") self.login_status = False except: logging.exception("Logout error!") def cleanup(self, *_): # Unfollow all bot follow if self.follow_counter >= self.unfollow_counter: for i in range(len(self.bot_follow_list)): f = self.bot_follow_list[0] if check_already_unfollowed(self, f[0]): log_string = "Already unfollowed before, skipping: %s" % (f[0]) self.write_log(log_string) else: log_string = "Trying to unfollow: %s" % (f[0]) self.write_log(log_string) self.unfollow_on_cleanup(f[0]) sleeptime = random.randint( self.unfollow_break_min, self.unfollow_break_max ) log_string = "Pausing for %i seconds... %i of %i" % ( sleeptime, self.unfollow_counter, self.follow_counter, ) self.write_log(log_string) time.sleep(sleeptime) self.bot_follow_list.remove(f) # Logout if self.login_status and self.session_file is None: self.logout() self.prog_run = False def get_media_id_by_tag(self, tag): """ Get media ID set, by your hashtag or location """ if self.login_status: if tag.startswith("l:"): tag = tag.replace("l:", "") self.by_location = True self.write_log(f"Get Media by location: {tag}") if self.login_status == 1: url_location = self.url_location % (tag) try: r = self.s.get(url_location) all_data = json.loads(r.text) self.media_by_tag = list( all_data["graphql"]["location"]["edge_location_to_media"][ "edges" ] ) except: self.media_by_tag = [] self.write_log("Except on get_media!") logging.exception("get_media_id_by_tag") else: return 0 else: self.by_location = False self.write_log(f"Get Media by tag: {tag}") if self.login_status == 1: url_tag = self.url_tag % (tag) try: r = self.s.get(url_tag) all_data = json.loads(r.text) self.media_by_tag = list( all_data["graphql"]["hashtag"]["edge_hashtag_to_media"][ "edges" ] ) except: self.media_by_tag = [] self.write_log("Except on get_media!") logging.exception("get_media_id_by_tag") else: return 0 def get_instagram_url_from_media_id(self, media_id, url_flag=True, only_code=None): """ Get Media Code or Full Url from Media ID Thanks to Nikished """ media_id = int(media_id) if url_flag is False: return "" else: alphabet = ( "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_" ) shortened_id = "" while media_id > 0: media_id, idx = divmod(media_id, 64) shortened_id = alphabet[idx] + shortened_id if only_code: return shortened_id else: return f"instagram.com/p/{shortened_id}/" def get_username_by_media_id(self, media_id): """ Get username by media ID Thanks to Nikished """ if self.login_status: if self.login_status == 1: media_id_url = self.get_instagram_url_from_media_id( int(media_id), only_code=True ) url_media = self.url_media_detail % (media_id_url) try: r = self.s.get(url_media) all_data = json.loads(r.text) username = str( all_data["graphql"]["shortcode_media"]["owner"]["username"] ) self.write_log( "media_id=" + media_id + ", media_id_url=" + media_id_url + ", username_by_media_id=" + username ) return username except: logging.exception("username_by_mediaid exception") return False else: return "" def get_username_by_user_id(self, user_id): if self.login_status: try: profile = instaloader.Profile.from_id(self.instaload.context, user_id) username = profile.username return username except: logging.exception("Except on get_username_by_user_id") return False else: return False def get_userinfo_by_name(self, username): """ Get user info by name """ if self.login_status: if self.login_status == 1: url_info = self.url_user_detail % (username) try: r = self.s.get(url_info) all_data = json.loads(r.text) user_info = all_data["user"] follows = user_info["follows"]["count"] follower = user_info["followed_by"]["count"] follow_viewer = user_info["follows_viewer"] if follower > 3000 or follows > 1500: self.write_log( " >>>This is probably Selebgram, Business or Fake account" ) if follow_viewer: return None return user_info except: logging.exception("Except on get_userinfo_by_name") return False else: return False def like_all_exist_media(self, media_size=-1, delay=True): """ Like all media ID that have self.media_by_tag """ if self.login_status: if self.media_by_tag != 0: i = 0 for d in self.media_by_tag: # Media count by this tag. if media_size > 0 or media_size < 0: media_size -= 1 l_c = self.media_by_tag[i]["node"]["edge_liked_by"]["count"] if ( (l_c <= self.media_max_like and l_c >= self.media_min_like) or (self.media_max_like == 0 and l_c >= self.media_min_like) or (self.media_min_like == 0 and l_c <= self.media_max_like) or (self.media_min_like == 0 and self.media_max_like == 0) ): for ( blacklisted_user_name, blacklisted_user_id, ) in self.user_blacklist.items(): if ( self.media_by_tag[i]["node"]["owner"]["id"] == blacklisted_user_id ): self.write_log( f"Not liking media owned by blacklisted user: {blacklisted_user_name}" ) return False if ( self.media_by_tag[i]["node"]["owner"]["id"] == self.user_id ): self.write_log("Keep calm - It's your own media ;)") return False if ( check_already_liked( self, media_id=self.media_by_tag[i]["node"]["id"] ) == 1 ): self.write_log("Keep calm - It's already liked ;)") return False try: if ( len( self.media_by_tag[i]["node"][ "edge_media_to_caption" ]["edges"] ) > 1 ): caption = self.media_by_tag[i]["node"][ "edge_media_to_caption" ]["edges"][0]["node"]["text"].encode( "ascii", errors="ignore" ) tag_blacklist = set(self.tag_blacklist) if sys.version_info[0] == 3: tags = { str.lower((tag.decode("ASCII")).strip("#")) for tag in caption.split() if (tag.decode("ASCII")).startswith("#") } else: tags = { unicode.lower( (tag.decode("ASCII")).strip("#") ) for tag in caption.split() if (tag.decode("ASCII")).startswith("#") } if tags.intersection(tag_blacklist): matching_tags = ", ".join( tags.intersection(tag_blacklist) ) self.write_log( f"Not liking media with blacklisted tag(s): {matching_tags}" ) return False except: logging.exception("Except on like_all_exist_media") return False log_string = ( "Trying to like media: %s\n %s" % ( self.media_by_tag[i]["node"]["id"], self.url_media % self.media_by_tag[i]["node"]["shortcode"], ) ) self.write_log(log_string) like = self.like(self.media_by_tag[i]["node"]["id"]) # comment = self.comment(self.media_by_tag[i]['id'], 'Cool!') # follow = self.follow(self.media_by_tag[i]["owner"]["id"]) if like != 0: if like.status_code == 200: # Like, all ok! self.error_400 = 0 self.like_counter += 1 log_string = f"Liked: {self.media_by_tag[i]["node"]["id"]}. Like #{self.like_counter}." insert_media( self, media_id=self.media_by_tag[i]["node"]["id"], status="200", ) self.write_log(log_string) elif like.status_code == 400: self.write_log(f"Not liked: {like.status_code} message {like.text}") insert_media( self, media_id=self.media_by_tag[i]["node"]["id"], status="400", ) # Some error. If repeated - can be ban! if self.error_400 >= self.error_400_to_ban: # Look like you banned! time.sleep(self.ban_sleep_time) else: self.error_400 += 1 else: insert_media( self, media_id=self.media_by_tag[i]["node"]["id"], status=str(like.status_code), ) self.write_log(f"Not liked: {like.status_code} message {like.text}") return False # Some error. i += 1 if delay: time.sleep( self.like_delay * 0.9 + self.like_delay * 0.2 * random.random() ) else: return True else: return False else: return False else: return False else: self.write_log("No media to like!") def like(self, media_id): """ Send http request to like media by ID """ if self.login_status: url_likes = self.url_likes % (media_id) try: like = self.s.post(url_likes) last_liked_media_id = media_id except: logging.exception("Except on like!") like = 0 return like def unlike(self, media_id): """ Send http request to unlike media by ID """ if self.login_status: url_unlike = self.url_unlike % (media_id) try: unlike = self.s.post(url_unlike) except: logging.exception("Except on unlike!") unlike = 0 return unlike def comment(self, media_id, comment_text): """ Send http request to comment """ if self.login_status: comment_post = {"comment_text": comment_text} url_comment = self.url_comment % (media_id) try: comment = self.s.post(url_comment, data=comment_post) if comment.status_code == 200: self.comments_counter += 1 log_string = f"Write: {comment_text}. #{self.comments_counter}." self.write_log(log_string) return comment except: logging.exception("Except on comment!") return False def follow(self, user_id, username=None): """ Send http request to follow """ if self.login_status: url_follow = self.url_follow % (user_id) if username is None: username = self.get_username_by_user_id(user_id=user_id) try: follow = self.s.post(url_follow) if follow.status_code == 200: self.follow_counter += 1 log_string = f"Followed: {user_id} #{self.follow_counter}." self.write_log(log_string) insert_username(self, user_id=user_id, username=username) return follow except: logging.exception("Except on follow!") return False def unfollow(self, user_id): """ Send http request to unfollow """ if self.login_status: url_unfollow = self.url_unfollow % (user_id) try: unfollow = self.s.post(url_unfollow) if unfollow.status_code == 200: self.unfollow_counter += 1 log_string = f"Unfollowed: {user_id} #{self.unfollow_counter}." self.write_log(log_string) insert_unfollow_count(self, user_id=user_id) return unfollow except: logging.exception("Exept on unfollow!") return False def unfollow_on_cleanup(self, user_id): """ Unfollow on cleanup by @rjmayott """ if self.login_status: url_unfollow = self.url_unfollow % (user_id) try: unfollow = self.s.post(url_unfollow) if unfollow.status_code == 200: self.unfollow_counter += 1 log_string = f"Unfollow: {user_id} #{self.unfollow_counter} of {self.follow_counter}." self.write_log(log_string) insert_unfollow_count(self, user_id=user_id) else: log_string = ( "Slow Down - Pausing for 5 minutes to avoid getting banned" ) self.write_log(log_string) time.sleep(300) unfollow = self.s.post(url_unfollow) if unfollow.status_code == 200: self.unfollow_counter += 1 log_string = f"Unfollow: {user_id} #{self.unfollow_counter} of {self.follow_counter}." self.write_log(log_string) insert_unfollow_count(self, user_id=user_id) else: log_string = "Still no good :( Skipping and pausing for another 5 minutes" self.write_log(log_string) time.sleep(300) return False return unfollow except: logging.exception("Except on unfollow.") return False # Backwards Compatibility for old example.py files def auto_mod(self): self.mainloop() def new_auto_mod(self): self.mainloop() def mainloop(self): while self.prog_run and self.login_status: now = datetime.datetime.now() if datetime.time( self.start_at_h, self.start_at_m ) <= now.time() and now.time() <= datetime.time( self.end_at_h, self.end_at_m ): # ------------------- Get media_id ------------------- if len(self.media_by_tag) == 0: self.get_media_id_by_tag(random.choice(self.tag_list)) self.this_tag_like_count = 0 self.max_tag_like_count = random.randint( 1, self.max_like_for_one_tag ) self.remove_already_liked() # ------------------- Like ------------------- self.new_auto_mod_like() # ------------------- Unlike ------------------- self.new_auto_mod_unlike() # ------------------- Follow ------------------- self.new_auto_mod_follow() # ------------------- Unfollow ------------------- self.new_auto_mod_unfollow() # ------------------- Comment ------------------- self.new_auto_mod_comments() # Bot iteration in 1 sec time.sleep(1) # print("Tic!") else: print( "!!sleeping until {hour}:{min}".format( hour=self.start_at_h, min=self.start_at_m ), end="\r", ) time.sleep(100) self.write_log("Exit Program... GoodBye") sys.exit(0) def remove_already_liked(self): self.write_log("Removing already liked medias..") x = 0 while x < len(self.media_by_tag): if ( check_already_liked(self, media_id=self.media_by_tag[x]["node"]["id"]) == 1 ): self.media_by_tag.remove(self.media_by_tag[x]) else: x += 1 def new_auto_mod_like(self): if ( time.time() > self.next_iteration["Like"] and self.like_per_day != 0 and len(self.media_by_tag) > 0 ): # You have media_id to like: if self.like_all_exist_media(media_size=1, delay=False): # If like go to sleep: self.next_iteration["Like"] = time.time() + self.add_time( self.like_delay ) # Count this tag likes: self.this_tag_like_count += 1 if self.this_tag_like_count >= self.max_tag_like_count: self.media_by_tag = [0] # Del first media_id try: del self.media_by_tag[0] except: print("Could not remove media") def new_auto_mod_unlike(self): if time.time() > self.next_iteration["Unlike"] and self.unlike_per_day != 0: media = get_medias_to_unlike(self) if media: self.write_log("Trying to unlike media") self.auto_unlike() self.next_iteration["Unlike"] = time.time() + self.add_time( self.unfollow_delay ) def new_auto_mod_follow(self): username = None if time.time() < self.next_iteration["Follow"]: return if ( time.time() > self.next_iteration["Follow"] and self.follow_per_day != 0 and len(self.media_by_tag) > 0 ): if self.media_by_tag[0]["node"]["owner"]["id"] == self.user_id: self.write_log("Keep calm - It's your own profile ;)") return if self.user_min_follow != 0 or self.user_max_follow != 0: try: username = self.get_username_by_user_id( self.media_by_tag[0]["node"]["owner"]["id"] ) url = self.url_user_detail % (username) r = self.s.get(url) all_data = json.loads( re.search( "window._sharedData = (.*?);</script>", r.text, re.DOTALL ).group(1) ) followers = all_data["entry_data"]["ProfilePage"][0]["graphql"][ "user" ]["edge_followed_by"]["count"] if followers < self.user_min_follow: self.write_log( f"Won't follow {username}: does not meet user_min_follow requirement" ) return if self.user_max_follow != 0 and followers > self.user_max_follow: self.write_log( f"Won't follow {username}: does not meet user_max_follow requirement" ) return except Exception: pass if ( check_already_followed( self, user_id=self.media_by_tag[0]["node"]["owner"]["id"] ) == 1 ): self.write_log( f"Already followed before {self.media_by_tag[0]["node"]["owner"]["id"]}" ) self.next_iteration["Follow"] = time.time() + self.add_time( self.follow_delay / 2 ) return log_string = ( f"Trying to follow: {self.media_by_tag[0]["node"]["owner"]["id"]}" ) self.write_log(log_string) self.next_iteration["Follow"] = time.time() + self.add_time( self.follow_delay ) if ( self.follow( user_id=self.media_by_tag[0]["node"]["owner"]["id"], username=username, ) is not False ): self.bot_follow_list.append( [self.media_by_tag[0]["node"]["owner"]["id"], time.time()] ) self.next_iteration["Follow"] = time.time() + self.add_time( self.follow_delay ) def populate_from_feed(self): self.get_media_id_recent_feed() try: for mediafeed_user in self.media_on_feed: feed_username = mediafeed_user["node"]["owner"]["username"] feed_user_id = mediafeed_user["node"]["owner"]["id"] # print(check_if_userid_exists(self, userid=feed_user_id)) if check_if_userid_exists(self, userid=feed_user_id) is False: insert_username(self, user_id=feed_user_id, username=feed_username) self.write_log(f"Inserted user {feed_username} from recent feed") except: self.write_log("Notice: could not populate from recent feed") def new_auto_mod_unfollow(self): if time.time() > self.next_iteration["Unfollow"] and self.unfollow_per_day != 0: if (time.time() - self.bot_start_ts) < 30: # let bot initialize return if get_username_row_count(self) < 20: self.write_log( f"> Waiting for database to populate before unfollowing (progress {str(get_username_row_count(self))} /20)" ) if self.unfollow_recent_feed is True: self.write_log("Will try to populate using recent feed") self.populate_from_feed() self.next_iteration["Unfollow"] = time.time() + ( self.add_time(self.unfollow_delay) / 2 ) return # DB doesn't have enough followers yet if self.bot_mode == 0 or self.bot_mode == 3: try: if ( time.time() > self.next_iteration["Populate"] and self.unfollow_recent_feed is True ): self.populate_from_feed() self.next_iteration["Populate"] = time.time() + ( self.add_time(360) ) except: self.write_log( "Notice: Could not populate from recent feed right now" ) log_string = f"Trying to unfollow #{self.unfollow_counter + 1}:" self.write_log(log_string) self.auto_unfollow() self.next_iteration["Unfollow"] = time.time() + self.add_time( self.unfollow_delay ) def new_auto_mod_comments(self): if ( time.time() > self.next_iteration["Comments"] and self.comments_per_day != 0 and len(self.media_by_tag) > 0 and self.check_exisiting_comment(self.media_by_tag[0]["node"]["shortcode"]) is False ): comment_text = self.generate_comment() log_string = f"Trying to comment: {self.media_by_tag[0]["node"]["id"]}" self.write_log(log_string) if ( self.comment(self.media_by_tag[0]["node"]["id"], comment_text) is not False ): self.next_iteration["Comments"] = time.time() + self.add_time( self.comments_delay ) def add_time(self, time): """ Make some random for next iteration""" return time * 0.9 + time * 0.2 * random.random() def generate_comment(self): c_list = list(itertools.product(*self.comment_list)) repl = [(" ", " "), (" .", "."), (" !", "!")] res = " ".join(random.choice(c_list)) for s, r in repl: res = res.replace(s, r) return res.capitalize() def check_exisiting_comment(self, media_code): url_check = self.url_media % (media_code) try: check_comment = self.s.get(url_check) if check_comment.status_code == 200: if "dialog-404" in check_comment.text: self.write_log( f"Tried to comment {media_code} but it doesn't exist (404). Resuming..." ) del self.media_by_tag[0] return True all_data = json.loads( re.search( "window._sharedData = (.*?);", check_comment.text, re.DOTALL ).group(1) )["entry_data"]["PostPage"][ 0 ] # window._sharedData = (.*?); if ( all_data["graphql"]["shortcode_media"]["owner"]["id"] == self.user_id ): self.write_log("Keep calm - It's your own media ;)") # Del media to don't loop on it del self.media_by_tag[0] return True try: comment_list = list( all_data["graphql"]["shortcode_media"]["edge_media_to_comment"][ "edges" ] ) except: comment_list = list( all_data["graphql"]["shortcode_media"][ "edge_media_to_parent_comment" ]["edges"] ) for d in comment_list: if d["node"]["owner"]["id"] == self.user_id: self.write_log("Keep calm - Media already commented ;)") # Del media to don't loop on it del self.media_by_tag[0] return True return False elif check_comment.status_code == 404: insert_media( self, self.media_by_tag[0]["node"]["id"], str(check_comment.status_code), ) self.write_log( f"Tried to comment {media_code} but it doesn't exist (404). Resuming..." ) del self.media_by_tag[0] return True else: insert_media( self, self.media_by_tag[0]["node"]["id"], str(check_comment.status_code), ) self.media_by_tag.remove(self.media_by_tag[0]) return True except: self.write_log("Couldn't comment post, resuming.") del self.media_by_tag[0] return True def auto_unlike(self): checking = True while checking: media_to_unlike = get_medias_to_unlike(self) if media_to_unlike: request = self.unlike(media_to_unlike) if request.status_code == 200: update_media_complete(self, media_to_unlike) else: self.write_log("Couldn't unlike media, resuming.") checking = False else: self.write_log("no medias to unlike") checking = False def auto_unfollow(self): checking = True while checking: username_row = get_username_to_unfollow_random(self) if not username_row: self.write_log("Looks like there is nobody to unfollow.") return False current_id = username_row[0] current_user = username_row[1] unfollow_count = username_row[2] if not current_user: current_user = self.get_username_by_user_id(user_id=current_id) if not current_user: log_string = "api limit reached from instagram. Will try later" self.write_log(log_string) return False for wluser in self.unfollow_whitelist: if wluser == current_user: log_string = "found whitelist user, starting search again" self.write_log(log_string) break else: checking = False if self.login_status: log_string = f"Getting user info : {current_user}" self.write_log(log_string) if self.login_status == 1: url_tag = self.url_user_detail % (current_user) try: r = self.s.get(url_tag) if ( r.text.find( "The link you followed may be broken, or the page may have been removed." ) != -1 ): log_string = ( f"Looks like account was deleted, skipping : {current_user}" ) self.write_log(log_string) insert_unfollow_count(self, user_id=current_id) time.sleep(3) return False all_data = json.loads( re.search( "window._sharedData = (.*?);</script>", r.text, re.DOTALL ).group(1) )["entry_data"]["ProfilePage"][0] user_info = all_data["graphql"]["user"] i = 0 log_string = "Checking user info.." self.write_log(log_string) follows = user_info["edge_follow"]["count"] follower = user_info["edge_followed_by"]["count"] media = user_info["edge_owner_to_timeline_media"]["count"] follow_viewer = user_info["follows_viewer"] followed_by_viewer = user_info["followed_by_viewer"] requested_by_viewer = user_info["requested_by_viewer"] has_requested_viewer = user_info["has_requested_viewer"] log_string = f"Follower : {follower}" self.write_log(log_string) log_string = f"Following : {follows}" self.write_log(log_string) log_string = f"Media : {media}" self.write_log(log_string) if follows == 0 or follower / follows > 2: self.is_selebgram = True self.is_fake_account = False self.write_log(" >>>This is probably Selebgram account") elif follower == 0 or follows / follower > 2: self.is_fake_account = True self.is_selebgram = False self.write_log(" >>>This is probably Fake account") else: self.is_selebgram = False self.is_fake_account = False self.write_log(" >>>This is a normal account") if media > 0 and follows / media < 25 and follower / media < 25: self.is_active_user = True self.write_log(" >>>This user is active") else: self.is_active_user = False self.write_log(" >>>This user is passive") if follow_viewer or has_requested_viewer: self.is_follower = True self.write_log(" >>>This account is following you") else: self.is_follower = False self.write_log(" >>>This account is NOT following you") if followed_by_viewer or requested_by_viewer: self.is_following = True self.write_log(" >>>You are following this account") else: self.is_following = False self.write_log(" >>>You are NOT following this account") except: logging.exception("Except on auto_unfollow!") time.sleep(3) return False else: return False if ( self.is_selebgram is not False or self.is_fake_account is not False or self.is_active_user is not True or self.is_follower is not True ): self.write_log(current_user) self.unfollow(current_id) # don't insert unfollow count as it is done now inside unfollow() # insert_unfollow_count(self, user_id=current_id) elif self.is_following is not True: # we are not following this account, hence we unfollowed it, let's keep track insert_unfollow_count(self, user_id=current_id) def unfollow_recent_feed(self): if len(self.media_on_feed) == 0: self.get_media_id_recent_feed() if ( len(self.media_on_feed) != 0 and self.is_follower_number < 5 and time.time() > self.next_iteration["Unfollow"] and self.unfollow_per_day != 0 ): self.get_media_id_recent_feed() chooser = random.randint(0, len(self.media_on_feed) - 1) self.current_user = self.media_on_feed[chooser]["node"]["owner"]["username"] self.current_id = self.media_on_feed[chooser]["node"]["owner"]["id"] current_user = self.current_user current_id = self.current_id if self.login_status: log_string = f"Getting user info : {current_user}" self.write_log(log_string) if self.login_status == 1: url_tag = self.url_user_detail % (current_user) try: r = self.s.get(url_tag) if ( r.text.find( "The link you followed may be broken, or the page may have been removed." ) != -1 ): log_string = ( f"Looks like account was deleted, skipping : {current_user}" ) self.write_log(log_string) insert_unfollow_count(self, user_id=current_id) time.sleep(3) return False all_data = json.loads( re.search( "window._sharedData = (.*?);</script>", r.text, re.DOTALL ).group(1) )["entry_data"]["ProfilePage"][0] user_info = all_data["graphql"]["user"] i = 0 log_string = "Checking user info.." self.write_log(log_string) follows = user_info["edge_follow"]["count"] follower = user_info["edge_followed_by"]["count"] media = user_info["edge_owner_to_timeline_media"]["count"] follow_viewer = user_info["follows_viewer"] followed_by_viewer = user_info["followed_by_viewer"] requested_by_viewer = user_info["requested_by_viewer"] has_requested_viewer = user_info["has_requested_viewer"] log_string = f"Follower : {follower}" self.write_log(log_string) log_string = f"Following : {follows}" self.write_log(log_string) log_string = f"Media : {media}" self.write_log(log_string) if follows == 0 or follower / follows > 2: self.is_selebgram = True self.is_fake_account = False self.write_log(" >>>This is probably Selebgram account") elif follower == 0 or follows / follower > 2: self.is_fake_account = True self.is_selebgram = False self.write_log(" >>>This is probably Fake account") else: self.is_selebgram = False self.is_fake_account = False self.write_log(" >>>This is a normal account") if media > 0 and follows / media < 25 and follower / media < 25: self.is_active_user = True self.write_log(" >>>This user is active") else: self.is_active_user = False self.write_log(" >>>This user is passive") if follow_viewer or has_requested_viewer: self.is_follower = True self.write_log(" >>>This account is following you") else: self.is_follower = False self.write_log(" >>>This account is NOT following you") if followed_by_viewer or requested_by_viewer: self.is_following = True self.write_log(" >>>You are following this account") else: self.is_following = False self.write_log(" >>>You are NOT following this account") except: logging.exception("Except on auto_unfollow!") time.sleep(3) return False else: return False if ( self.is_selebgram is not False or self.is_fake_account is not False or self.is_active_user is not True or self.is_follower is not True ): self.write_log(current_user) self.unfollow(current_id) self.next_iteration["Unfollow"] = time.time() + self.add_time( self.unfollow_delay ) # don't insert unfollow count as it is done now inside unfollow() # insert_unfollow_count(self, user_id=current_id) elif self.is_following is not True: # we are not following this account, hence we unfollowed it, let's keep track insert_unfollow_count(self, user_id=current_id) time.sleep(8) def get_media_id_recent_feed(self): if self.login_status: now_time = datetime.datetime.now() log_string = f"{self.user_login} : Get media id on recent feed" self.write_log(log_string) if self.login_status == 1: url_tag = "https://www.instagram.com/" try: r = self.s.get(url_tag) jsondata = re.search( "additionalDataLoaded\('feed',({.*})\);", r.text ).group(1) all_data = json.loads(jsondata.strip()) self.media_on_feed = list( all_data["user"]["edge_web_feed_timeline"]["edges"] ) log_string = f"Media in recent feed = {len(self.media_on_feed)}" self.write_log(log_string) except: logging.exception("get_media_id_recent_feed") self.media_on_feed = [] time.sleep(20) return 0 else: return 0 def write_log(self, log_text): """ Write log by print() or logger """ if self.log_mod == 0: try: now_time = datetime.datetime.now() print(f"{now_time.strftime("%d.%m.%Y_%H:%M")} {log_text}") except UnicodeEncodeError: print("Your text has unicode problem!") elif self.log_mod == 1: # Create log_file if not exist. if self.log_file == 0: self.log_file = 1 now_time = datetime.datetime.now() self.log_full_path = "%s%s_%s.log" % ( self.log_file_path, self.user_login, now_time.strftime("%d.%m.%Y_%H:%M"), ) formatter = logging.Formatter("%(asctime)s - %(name)s " "- %(message)s") self.logger = logging.getLogger(self.user_login) self.hdrl = logging.FileHandler(self.log_full_path, mode="w") self.hdrl.setFormatter(formatter) self.logger.setLevel(level=logging.INFO) self.logger.addHandler(self.hdrl) # Log to log file. try: self.logger.info(log_text) except UnicodeEncodeError: print("Your text has unicode problem!")
#!/usr/bin/env python3 # -*- coding: utf-8 -*- from __future__ import print_function from .sql_updates import check_and_insert_user_agent from .sql_updates import get_username_random, get_username_to_unfollow_random from .sql_updates import ( get_usernames_first, get_usernames, get_username_row_count, check_if_userid_exists, get_medias_to_unlike, update_media_complete, ) from .sql_updates import insert_media, insert_username, insert_unfollow_count from .sql_updates import check_already_followed, check_already_unfollowed from .sql_updates import check_and_update, check_already_liked import re import time import sqlite3 import signal import random import logging import json import itertools import datetime import atexit import importlib import os import sys import pickle python_version_test = f"If you are reading this error, you are not running Python 3.6 or greater. Check 'python --version' or 'python3 --version'." # Required Dependencies and Modules, offer to install them automatically # Keep fake_useragent last, quirk for pythonanywhere required_modules = ["requests", "instaloader", "threading", "fake_useragent"] for modname in required_modules: try: # try to import the module normally and put it in globals globals()[modname] = importlib.import_module(modname) except ImportError as e: if modname is not "fake_useragent": print( f"Failed to load module {modname}. Make sure you have installed correctly dependencies in requirements.txt." ) quit() class InstaBot: """ Instabot.py version 1.2.2 """ database_name = None session_file = None follows_db = None follows_db_c = None url = "https://www.instagram.com/" url_tag = "https://www.instagram.com/explore/tags/%s/?__a=1" url_location = "https://www.instagram.com/explore/locations/%s/?__a=1" url_likes = "https://www.instagram.com/web/likes/%s/like/" url_unlike = "https://www.instagram.com/web/likes/%s/unlike/" url_comment = "https://www.instagram.com/web/comments/%s/add/" url_follow = "https://www.instagram.com/web/friendships/%s/follow/" url_unfollow = "https://www.instagram.com/web/friendships/%s/unfollow/" url_login = "https://www.instagram.com/accounts/login/ajax/" url_logout = "https://www.instagram.com/accounts/logout/" url_media_detail = "https://www.instagram.com/p/%s/?__a=1" url_media = "https://www.instagram.com/p/%s/" url_user_detail = "https://www.instagram.com/%s/" api_user_detail = "https://i.instagram.com/api/v1/users/%s/info/" instabot_repo_update = ( "https://github.com/instabot-py/instabot.py/raw/master/version.txt" ) user_agent = "" "" accept_language = "en-US,en;q=0.5" # If instagram ban you - query return 400 error. error_400 = 0 # If you have 3 400 error in row - looks like you banned. error_400_to_ban = 3 # If InstaBot think you are banned - going to sleep. ban_sleep_time = 3 * 60 * 60 # All counter. bot_mode = 0 like_counter = 0 follow_counter = 0 unfollow_counter = 0 comments_counter = 0 current_user = "hajka" current_index = 0 current_id = "abcds" # List of user_id, that bot follow bot_follow_list = [] user_info_list = [] user_list = [] ex_user_list = [] unwanted_username_list = [] is_checked = False is_selebgram = False is_fake_account = False is_active_user = False is_following = False is_follower = False is_rejected = False is_self_checking = False is_by_tag = False is_follower_number = 0 self_following = 0 self_follower = 0 # Log setting. logging.basicConfig(filename="errors.log", level=logging.INFO) log_file_path = "" log_file = 0 # Other. user_id = 0 media_by_tag = 0 media_on_feed = [] media_by_user = [] login_status = False by_location = False # Running Times start_at_h = 0 start_at_m = 0 end_at_h = 23 end_at_m = 59 # For new_auto_mod next_iteration = { "Like": 0, "Unlike": 0, "Follow": 0, "Unfollow": 0, "Comments": 0, "Populate": 0, } prog_run = True def __init__( self, login, password, like_per_day=1000, unlike_per_day=0, media_max_like=150, media_min_like=0, user_max_follow=0, user_min_follow=0, follow_per_day=0, time_till_unlike=3 * 24 * 60 * 60, # Cannot be zero follow_time=5 * 60 * 60, # Cannot be zero follow_time_enabled=True, unfollow_per_day=0, unfollow_recent_feed=True, start_at_h=0, start_at_m=0, end_at_h=23, end_at_m=59, database_name=None, session_file=None, # False = disabled, None = Will use default username.session notation, string = will use that as filename comment_list=[ ["this", "the", "your"], ["photo", "picture", "pic", "shot", "snapshot"], ["is", "looks", "feels", "is really"], [ "great", "super", "good", "very good", "good", "wow", "WOW", "cool", "GREAT", "magnificent", "magical", "very cool", "stylish", "beautiful", "so beautiful", "so stylish", "so professional", "lovely", "so lovely", "very lovely", "glorious", "so glorious", "very glorious", "adorable", "excellent", "amazing", ], [".", "..", "...", "!", "!!", "!!!"], ], comments_per_day=0, tag_list=["cat", "car", "dog"], max_like_for_one_tag=5, unfollow_break_min=15, unfollow_break_max=30, log_mod=0, proxy="", user_blacklist={}, tag_blacklist=[], unwanted_username_list=[], unfollow_whitelist=[], ): if session_file is not None and session_file is not False: self.session_file = session_file elif session_file is False: self.session_file = None else: self.session_file = f"{login.lower()}.session" if database_name is not None: self.database_name = database_name else: self.database_name = f"{login.lower()}.db" self.follows_db = sqlite3.connect( self.database_name, timeout=0, isolation_level=None ) self.follows_db_c = self.follows_db.cursor() check_and_update(self) list_of_ua = [ "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; FSL 7.0.6.01001)", "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; FSL 7.0.7.01001)", "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; FSL 7.0.5.01003)", "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:12.0) Gecko/20100101 Firefox/12.0", "Mozilla/5.0 (X11; U; Linux x86_64; de; rv:1.9.2.8) Gecko/20100723 Ubuntu/10.04 (lucid) Firefox/3.6.8", "Mozilla/5.0 (Windows NT 5.1; rv:13.0) Gecko/20100101 Firefox/13.0.1", "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:11.0) Gecko/20100101 Firefox/11.0", "Mozilla/5.0 (X11; U; Linux x86_64; de; rv:1.9.2.8) Gecko/20100723 Ubuntu/10.04 (lucid) Firefox/3.6.8", "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; .NET CLR 1.0.3705)", "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0)", "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)", "Opera/9.80 (Windows NT 5.1; U; en) Presto/2.10.289 Version/12.01", "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727)", "Mozilla/5.0 (Windows NT 5.1; rv:5.0.1) Gecko/20100101 Firefox/5.0.1", "Mozilla/5.0 (Windows NT 6.1; rv:5.0) Gecko/20100101 Firefox/5.02", "Mozilla/5.0 (Windows NT 6.0) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.112 Safari/535.1", "Mozilla/4.0 (compatible; MSIE 6.0; MSIE 5.5; Windows NT 5.0) Opera 7.02 Bork-edition [en]", ] try: fallback = random.sample(list_of_ua, 1) fake_ua = fake_useragent.UserAgent(fallback=fallback[0]) self.user_agent = check_and_insert_user_agent(self, str(fake_ua)) except: fake_ua = random.sample(list_of_ua, 1) self.user_agent = check_and_insert_user_agent(self, str(fake_ua[0])) self.bot_start = datetime.datetime.now() self.bot_start_ts = time.time() self.start_at_h = start_at_h self.start_at_m = start_at_m self.end_at_h = end_at_h self.end_at_m = end_at_m self.unfollow_break_min = unfollow_break_min self.unfollow_break_max = unfollow_break_max self.user_blacklist = user_blacklist self.tag_blacklist = tag_blacklist self.unfollow_whitelist = unfollow_whitelist self.comment_list = comment_list self.instaloader = instaloader.Instaloader() self.unfollow_recent_feed = unfollow_recent_feed self.time_in_day = 24 * 60 * 60 # Like self.like_per_day = like_per_day if self.like_per_day != 0: self.like_delay = self.time_in_day / self.like_per_day # Unlike self.time_till_unlike = time_till_unlike self.unlike_per_day = unlike_per_day if self.unlike_per_day != 0: self.unlike_per_day = self.time_in_day / self.unlike_per_day # Follow self.follow_time = follow_time # Cannot be zero self.follow_time_enabled = follow_time_enabled self.follow_per_day = follow_per_day if self.follow_per_day != 0: self.follow_delay = self.time_in_day / self.follow_per_day # Unfollow self.unfollow_per_day = unfollow_per_day if self.unfollow_per_day != 0: self.unfollow_delay = self.time_in_day / self.unfollow_per_day # Comment self.comments_per_day = comments_per_day if self.comments_per_day != 0: self.comments_delay = self.time_in_day / self.comments_per_day # Don't like if media have more than n likes. self.media_max_like = media_max_like # Don't like if media have less than n likes. self.media_min_like = media_min_like # Don't follow if user have more than n followers. self.user_max_follow = user_max_follow # Don't follow if user have less than n followers. self.user_min_follow = user_min_follow # Auto mod seting: # Default list of tag. self.tag_list = tag_list # Get random tag, from tag_list, and like (1 to n) times. self.max_like_for_one_tag = max_like_for_one_tag # log_mod 0 to console, 1 to file self.log_mod = log_mod self.s = requests.Session() self.c = requests.Session() # if you need proxy make something like this: # self.s.proxies = {"https" : "http://proxyip:proxyport"} # by @ageorgios if proxy != "": proxies = {"http": f"http://{proxy}", "https": f"http://{proxy}"} self.s.proxies.update(proxies) self.c.proxies.update(proxies) # convert login to lower self.user_login = login.lower() self.user_password = password self.bot_mode = 0 self.media_by_tag = [] self.media_on_feed = [] self.media_by_user = [] self.current_user_info = "" self.unwanted_username_list = unwanted_username_list now_time = datetime.datetime.now() self.check_for_bot_update() log_string = "Instabot v1.2.2/0 started at %s:" % ( now_time.strftime("%d.%m.%Y %H:%M") ) self.write_log(log_string) self.login() self.populate_user_blacklist() signal.signal(signal.SIGINT, self.cleanup) signal.signal(signal.SIGTERM, self.cleanup) atexit.register(self.cleanup) self.instaload = instaloader.Instaloader() def check_for_bot_update(self): self.write_log("Checking for updates...") try: # CHANGE THIS TO OFFICIAL REPO IF KEPT updated_timestamp = self.c.get(self.instabot_repo_update) current_version_timestamp = open("version.txt", "r") if int(updated_timestamp.text) > int(current_version_timestamp.read()): self.write_log( ">>> UPDATE AVAILABLE <<< Please update Instabot. You are running an older version." ) else: self.write_log("You are running the latest stable version") except: self.write_log("Could not check for updates") def get_user_id_by_username(self, user_name): url_info = self.url_user_detail % (user_name) info = self.s.get(url_info) json_info = json.loads( re.search( "window._sharedData = (.*?);</script>", info.text, re.DOTALL ).group(1) ) id_user = json_info["entry_data"]["ProfilePage"][0]["graphql"]["user"]["id"] return id_user def populate_user_blacklist(self): for user in self.user_blacklist: user_id_url = self.url_user_detail % (user) info = self.s.get(user_id_url) # prevent error if 'Account of user was deleted or link is invalid from json import JSONDecodeError try: all_data = json.loads( re.search( "window._sharedData = (.*?);</script>", info.text, re.DOTALL ).group(1) ) except JSONDecodeError as e: self.write_log( f"Account of user {user} was deleted or link is " "invalid" ) else: # prevent exception if user have no media id_user = all_data["entry_data"]["ProfilePage"][0]["graphql"]["user"][ "id" ] # Update the user_name with the user_id self.user_blacklist[user] = id_user self.write_log(f"Blacklisted user {user} added with ID: {id_user}") time.sleep(5 * random.random()) def login(self): successfulLogin = False self.s.headers.update( { "Accept": "*/*", "Accept-Language": self.accept_language, "Accept-Encoding": "gzip, deflate, br", "Connection": "keep-alive", "Host": "www.instagram.com", "Origin": "https://www.instagram.com", "Referer": "https://www.instagram.com/", "User-Agent": self.user_agent, "X-Instagram-AJAX": "1", "Content-Type": "application/x-www-form-urlencoded", "X-Requested-With": "XMLHttpRequest", } ) if self.session_file is not None and os.path.isfile(self.session_file): self.write_log(f"Found session file {self.session_file}") successfulLogin = True with open(self.session_file, "rb") as i: cookies = pickle.load(i) self.s.cookies.update(cookies) else: self.write_log("Trying to login as {}...".format(self.user_login)) self.login_post = { "username": self.user_login, "password": self.user_password, } r = self.s.get(self.url) csrf_token = re.search('(?<="csrf_token":")\w+', r.text).group(0) self.s.headers.update({"X-CSRFToken": csrf_token}) time.sleep(5 * random.random()) login = self.s.post( self.url_login, data=self.login_post, allow_redirects=True ) if ( login.status_code != 200 and login.status_code != 400 ): # Handling Other Status Codes and making debug easier!! self.write_log("Request didn't return 200 as status code!") self.write_log("Here is more info for debbugin or creating an issue") print("=" * 15) print("Response Status: ", login.status_code) print("=" * 15) print("Response Content:\n", login.text) print("=" * 15) print("Response Header:\n", login.headers) print("=" * 15) return loginResponse = login.json() try: self.csrftoken = login.cookies["csrftoken"] self.s.headers.update({"X-CSRFToken": login.cookies["csrftoken"]}) except Exception as e: self.write_log("Something wrong with login") self.write_log(login.text) if loginResponse.get("errors"): self.write_log( "Something is wrong with Instagram! Please try again later..." ) for error in loginResponse["errors"]["error"]: self.write_log(f"Error =>{error}") return if loginResponse.get("message") == "checkpoint_required": try: if "instagram.com" in loginResponse["checkpoint_url"]: challenge_url = loginResponse["checkpoint_url"] else: challenge_url = ( f"https://instagram.com{loginResponse['checkpoint_url']}" ) self.write_log(f"Challenge required at {challenge_url}") with self.s as clg: clg.headers.update( { "Accept": "*/*", "Accept-Language": self.accept_language, "Accept-Encoding": "gzip, deflate, br", "Connection": "keep-alive", "Host": "www.instagram.com", "Origin": "https://www.instagram.com", "User-Agent": self.user_agent, "X-Instagram-AJAX": "1", "Content-Type": "application/x-www-form-urlencoded", "x-requested-with": "XMLHttpRequest", } ) # Get challenge page challenge_request_explore = clg.get(challenge_url) # Get CSRF Token from challenge page challenge_csrf_token = re.search( '(?<="csrf_token":")\w+', challenge_request_explore.text ).group(0) # Get Rollout Hash from challenge page rollout_hash = re.search( '(?<="rollout_hash":")\w+', challenge_request_explore.text ).group(0) # Ask for option 1 from challenge, which is usually Email or Phone challenge_post = {"choice": 1} # Update headers for challenge submit page clg.headers.update({"X-CSRFToken": challenge_csrf_token}) clg.headers.update({"Referer": challenge_url}) # Request instagram to send a code challenge_request_code = clg.post( challenge_url, data=challenge_post, allow_redirects=True ) # User should receive a code soon, ask for it challenge_userinput_code = input( "Challenge Required.\n\nEnter the code sent to your mail/phone: " ) challenge_security_post = { "security_code": int(challenge_userinput_code) } complete_challenge = clg.post( challenge_url, data=challenge_security_post, allow_redirects=True, ) if complete_challenge.status_code != 200: self.write_log("Entered code is wrong, Try again later!") return self.csrftoken = complete_challenge.cookies["csrftoken"] self.s.headers.update( {"X-CSRFToken": self.csrftoken, "X-Instagram-AJAX": "1"} ) successfulLogin = complete_challenge.status_code == 200 except Exception as err: print(f"Login failed, response: \n\n{login.text} {err}") quit() elif loginResponse.get("authenticated") is False: self.write_log("Login error! Check your login data!") return else: rollout_hash = re.search('(?<="rollout_hash":")\w+', r.text).group(0) self.s.headers.update({"X-Instagram-AJAX": rollout_hash}) successfulLogin = True # ig_vw=1536; ig_pr=1.25; ig_vh=772; ig_or=landscape-primary; self.s.cookies["csrftoken"] = self.csrftoken self.s.cookies["ig_vw"] = "1536" self.s.cookies["ig_pr"] = "1.25" self.s.cookies["ig_vh"] = "772" self.s.cookies["ig_or"] = "landscape-primary" time.sleep(5 * random.random()) if successfulLogin: r = self.s.get("https://www.instagram.com/") self.csrftoken = re.search('(?<="csrf_token":")\w+', r.text).group(0) self.s.cookies["csrftoken"] = self.csrftoken self.s.headers.update({"X-CSRFToken": self.csrftoken}) finder = r.text.find(self.user_login) if finder != -1: self.user_id = self.get_user_id_by_username(self.user_login) self.login_status = True self.write_log(f"{self.user_login} login success!\n") if self.session_file is not None: self.write_log( f"Saving cookies to session file {self.session_file}" ) with open(self.session_file, "wb") as output: pickle.dump(self.s.cookies, output, pickle.HIGHEST_PROTOCOL) else: self.login_status = False self.write_log("Login error! Check your login data!") if self.session_file is not None and os.path.isfile(self.session_file): try: os.remove(self.session_file) except: self.write_log( "Could not delete session file. Please delete manually" ) self.prog_run = False else: self.write_log("Login error! Connection error!") def logout(self): now_time = datetime.datetime.now() log_string = ( "Logout: likes - %i, follow - %i, unfollow - %i, comments - %i." % ( self.like_counter, self.follow_counter, self.unfollow_counter, self.comments_counter, ) ) self.write_log(log_string) work_time = datetime.datetime.now() - self.bot_start self.write_log(f"Bot work time: {work_time}") try: logout_post = {"csrfmiddlewaretoken": self.csrftoken} logout = self.s.post(self.url_logout, data=logout_post) self.write_log("Logout success!") self.login_status = False except: logging.exception("Logout error!") def cleanup(self, *_): # Unfollow all bot follow if self.follow_counter >= self.unfollow_counter: for i in range(len(self.bot_follow_list)): f = self.bot_follow_list[0] if check_already_unfollowed(self, f[0]): log_string = "Already unfollowed before, skipping: %s" % (f[0]) self.write_log(log_string) else: log_string = "Trying to unfollow: %s" % (f[0]) self.write_log(log_string) self.unfollow_on_cleanup(f[0]) sleeptime = random.randint( self.unfollow_break_min, self.unfollow_break_max ) log_string = "Pausing for %i seconds... %i of %i" % ( sleeptime, self.unfollow_counter, self.follow_counter, ) self.write_log(log_string) time.sleep(sleeptime) self.bot_follow_list.remove(f) # Logout if self.login_status and self.session_file is None: self.logout() self.prog_run = False def get_media_id_by_tag(self, tag): """ Get media ID set, by your hashtag or location """ if self.login_status: if tag.startswith("l:"): tag = tag.replace("l:", "") self.by_location = True self.write_log(f"Get Media by location: {tag}") if self.login_status == 1: url_location = self.url_location % (tag) try: r = self.s.get(url_location) all_data = json.loads(r.text) self.media_by_tag = list( all_data["graphql"]["location"]["edge_location_to_media"][ "edges" ] ) except: self.media_by_tag = [] self.write_log("Except on get_media!") logging.exception("get_media_id_by_tag") else: return 0 else: self.by_location = False self.write_log(f"Get Media by tag: {tag}") if self.login_status == 1: url_tag = self.url_tag % (tag) try: r = self.s.get(url_tag) all_data = json.loads(r.text) self.media_by_tag = list( all_data["graphql"]["hashtag"]["edge_hashtag_to_media"][ "edges" ] ) except: self.media_by_tag = [] self.write_log("Except on get_media!") logging.exception("get_media_id_by_tag") else: return 0 def get_instagram_url_from_media_id(self, media_id, url_flag=True, only_code=None): """ Get Media Code or Full Url from Media ID Thanks to Nikished """ media_id = int(media_id) if url_flag is False: return "" else: alphabet = ( "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_" ) shortened_id = "" while media_id > 0: media_id, idx = divmod(media_id, 64) shortened_id = alphabet[idx] + shortened_id if only_code: return shortened_id else: return f"instagram.com/p/{shortened_id}/" def get_username_by_media_id(self, media_id): """ Get username by media ID Thanks to Nikished """ if self.login_status: if self.login_status == 1: media_id_url = self.get_instagram_url_from_media_id( int(media_id), only_code=True ) url_media = self.url_media_detail % (media_id_url) try: r = self.s.get(url_media) all_data = json.loads(r.text) username = str( all_data["graphql"]["shortcode_media"]["owner"]["username"] ) self.write_log( "media_id=" + media_id + ", media_id_url=" + media_id_url + ", username_by_media_id=" + username ) return username except: logging.exception("username_by_mediaid exception") return False else: return "" def get_username_by_user_id(self, user_id): if self.login_status: try: profile = instaloader.Profile.from_id(self.instaload.context, user_id) username = profile.username return username except: logging.exception("Except on get_username_by_user_id") return False else: return False def get_userinfo_by_name(self, username): """ Get user info by name """ if self.login_status: if self.login_status == 1: url_info = self.url_user_detail % (username) try: r = self.s.get(url_info) all_data = json.loads(r.text) user_info = all_data["user"] follows = user_info["follows"]["count"] follower = user_info["followed_by"]["count"] follow_viewer = user_info["follows_viewer"] if follower > 3000 or follows > 1500: self.write_log( " >>>This is probably Selebgram, Business or Fake account" ) if follow_viewer: return None return user_info except: logging.exception("Except on get_userinfo_by_name") return False else: return False def like_all_exist_media(self, media_size=-1, delay=True): """ Like all media ID that have self.media_by_tag """ if self.login_status: if self.media_by_tag != 0: i = 0 for d in self.media_by_tag: # Media count by this tag. if media_size > 0 or media_size < 0: media_size -= 1 l_c = self.media_by_tag[i]["node"]["edge_liked_by"]["count"] if ( (l_c <= self.media_max_like and l_c >= self.media_min_like) or (self.media_max_like == 0 and l_c >= self.media_min_like) or (self.media_min_like == 0 and l_c <= self.media_max_like) or (self.media_min_like == 0 and self.media_max_like == 0) ): for ( blacklisted_user_name, blacklisted_user_id, ) in self.user_blacklist.items(): if ( self.media_by_tag[i]["node"]["owner"]["id"] == blacklisted_user_id ): self.write_log( f"Not liking media owned by blacklisted user: {blacklisted_user_name}" ) return False if ( self.media_by_tag[i]["node"]["owner"]["id"] == self.user_id ): self.write_log("Keep calm - It's your own media ;)") return False if ( check_already_liked( self, media_id=self.media_by_tag[i]["node"]["id"] ) == 1 ): self.write_log("Keep calm - It's already liked ;)") return False try: if ( len( self.media_by_tag[i]["node"][ "edge_media_to_caption" ]["edges"] ) > 1 ): caption = self.media_by_tag[i]["node"][ "edge_media_to_caption" ]["edges"][0]["node"]["text"].encode( "ascii", errors="ignore" ) tag_blacklist = set(self.tag_blacklist) if sys.version_info[0] == 3: tags = { str.lower((tag.decode("ASCII")).strip("#")) for tag in caption.split() if (tag.decode("ASCII")).startswith("#") } else: tags = { unicode.lower( (tag.decode("ASCII")).strip("#") ) for tag in caption.split() if (tag.decode("ASCII")).startswith("#") } if tags.intersection(tag_blacklist): matching_tags = ", ".join( tags.intersection(tag_blacklist) ) self.write_log( f"Not liking media with blacklisted tag(s): {matching_tags}" ) return False except: logging.exception("Except on like_all_exist_media") return False log_string = ( "Trying to like media: %s\n %s" % ( self.media_by_tag[i]["node"]["id"], self.url_media % self.media_by_tag[i]["node"]["shortcode"], ) ) self.write_log(log_string) like = self.like(self.media_by_tag[i]["node"]["id"]) # comment = self.comment(self.media_by_tag[i]['id'], 'Cool!') # follow = self.follow(self.media_by_tag[i]["owner"]["id"]) if like != 0: if like.status_code == 200: # Like, all ok! self.error_400 = 0 self.like_counter += 1 log_string = f"Liked: {self.media_by_tag[i]['node']['id']}. Like #{self.like_counter}." insert_media( self, media_id=self.media_by_tag[i]["node"]["id"], status="200", ) self.write_log(log_string) elif like.status_code == 400: self.write_log(f"Not liked: {like.status_code} message {like.text}") insert_media( self, media_id=self.media_by_tag[i]["node"]["id"], status="400", ) # Some error. If repeated - can be ban! if self.error_400 >= self.error_400_to_ban: # Look like you banned! time.sleep(self.ban_sleep_time) else: self.error_400 += 1 else: insert_media( self, media_id=self.media_by_tag[i]["node"]["id"], status=str(like.status_code), ) self.write_log(f"Not liked: {like.status_code} message {like.text}") return False # Some error. i += 1 if delay: time.sleep( self.like_delay * 0.9 + self.like_delay * 0.2 * random.random() ) else: return True else: return False else: return False else: return False else: self.write_log("No media to like!") def like(self, media_id): """ Send http request to like media by ID """ if self.login_status: url_likes = self.url_likes % (media_id) try: like = self.s.post(url_likes) last_liked_media_id = media_id except: logging.exception("Except on like!") like = 0 return like def unlike(self, media_id): """ Send http request to unlike media by ID """ if self.login_status: url_unlike = self.url_unlike % (media_id) try: unlike = self.s.post(url_unlike) except: logging.exception("Except on unlike!") unlike = 0 return unlike def comment(self, media_id, comment_text): """ Send http request to comment """ if self.login_status: comment_post = {"comment_text": comment_text} url_comment = self.url_comment % (media_id) try: comment = self.s.post(url_comment, data=comment_post) if comment.status_code == 200: self.comments_counter += 1 log_string = f"Write: {comment_text}. #{self.comments_counter}." self.write_log(log_string) return comment except: logging.exception("Except on comment!") return False def follow(self, user_id, username=None): """ Send http request to follow """ if self.login_status: url_follow = self.url_follow % (user_id) if username is None: username = self.get_username_by_user_id(user_id=user_id) try: follow = self.s.post(url_follow) if follow.status_code == 200: self.follow_counter += 1 log_string = f"Followed: {user_id} #{self.follow_counter}." self.write_log(log_string) insert_username(self, user_id=user_id, username=username) return follow except: logging.exception("Except on follow!") return False def unfollow(self, user_id): """ Send http request to unfollow """ if self.login_status: url_unfollow = self.url_unfollow % (user_id) try: unfollow = self.s.post(url_unfollow) if unfollow.status_code == 200: self.unfollow_counter += 1 log_string = f"Unfollowed: {user_id} #{self.unfollow_counter}." self.write_log(log_string) insert_unfollow_count(self, user_id=user_id) return unfollow except: logging.exception("Exept on unfollow!") return False def unfollow_on_cleanup(self, user_id): """ Unfollow on cleanup by @rjmayott """ if self.login_status: url_unfollow = self.url_unfollow % (user_id) try: unfollow = self.s.post(url_unfollow) if unfollow.status_code == 200: self.unfollow_counter += 1 log_string = f"Unfollow: {user_id} #{self.unfollow_counter} of {self.follow_counter}." self.write_log(log_string) insert_unfollow_count(self, user_id=user_id) else: log_string = ( "Slow Down - Pausing for 5 minutes to avoid getting banned" ) self.write_log(log_string) time.sleep(300) unfollow = self.s.post(url_unfollow) if unfollow.status_code == 200: self.unfollow_counter += 1 log_string = f"Unfollow: {user_id} #{self.unfollow_counter} of {self.follow_counter}." self.write_log(log_string) insert_unfollow_count(self, user_id=user_id) else: log_string = "Still no good :( Skipping and pausing for another 5 minutes" self.write_log(log_string) time.sleep(300) return False return unfollow except: logging.exception("Except on unfollow.") return False # Backwards Compatibility for old example.py files def auto_mod(self): self.mainloop() def new_auto_mod(self): self.mainloop() def mainloop(self): while self.prog_run and self.login_status: now = datetime.datetime.now() if datetime.time( self.start_at_h, self.start_at_m ) <= now.time() and now.time() <= datetime.time( self.end_at_h, self.end_at_m ): # ------------------- Get media_id ------------------- if len(self.media_by_tag) == 0: self.get_media_id_by_tag(random.choice(self.tag_list)) self.this_tag_like_count = 0 self.max_tag_like_count = random.randint( 1, self.max_like_for_one_tag ) self.remove_already_liked() # ------------------- Like ------------------- self.new_auto_mod_like() # ------------------- Unlike ------------------- self.new_auto_mod_unlike() # ------------------- Follow ------------------- self.new_auto_mod_follow() # ------------------- Unfollow ------------------- self.new_auto_mod_unfollow() # ------------------- Comment ------------------- self.new_auto_mod_comments() # Bot iteration in 1 sec time.sleep(1) # print("Tic!") else: print( "!!sleeping until {hour}:{min}".format( hour=self.start_at_h, min=self.start_at_m ), end="\r", ) time.sleep(100) self.write_log("Exit Program... GoodBye") sys.exit(0) def remove_already_liked(self): self.write_log("Removing already liked medias..") x = 0 while x < len(self.media_by_tag): if ( check_already_liked(self, media_id=self.media_by_tag[x]["node"]["id"]) == 1 ): self.media_by_tag.remove(self.media_by_tag[x]) else: x += 1 def new_auto_mod_like(self): if ( time.time() > self.next_iteration["Like"] and self.like_per_day != 0 and len(self.media_by_tag) > 0 ): # You have media_id to like: if self.like_all_exist_media(media_size=1, delay=False): # If like go to sleep: self.next_iteration["Like"] = time.time() + self.add_time( self.like_delay ) # Count this tag likes: self.this_tag_like_count += 1 if self.this_tag_like_count >= self.max_tag_like_count: self.media_by_tag = [0] # Del first media_id try: del self.media_by_tag[0] except: print("Could not remove media") def new_auto_mod_unlike(self): if time.time() > self.next_iteration["Unlike"] and self.unlike_per_day != 0: media = get_medias_to_unlike(self) if media: self.write_log("Trying to unlike media") self.auto_unlike() self.next_iteration["Unlike"] = time.time() + self.add_time( self.unfollow_delay ) def new_auto_mod_follow(self): username = None if time.time() < self.next_iteration["Follow"]: return if ( time.time() > self.next_iteration["Follow"] and self.follow_per_day != 0 and len(self.media_by_tag) > 0 ): if self.media_by_tag[0]["node"]["owner"]["id"] == self.user_id: self.write_log("Keep calm - It's your own profile ;)") return if self.user_min_follow != 0 or self.user_max_follow != 0: try: username = self.get_username_by_user_id( self.media_by_tag[0]["node"]["owner"]["id"] ) url = self.url_user_detail % (username) r = self.s.get(url) all_data = json.loads( re.search( "window._sharedData = (.*?);</script>", r.text, re.DOTALL ).group(1) ) followers = all_data["entry_data"]["ProfilePage"][0]["graphql"][ "user" ]["edge_followed_by"]["count"] if followers < self.user_min_follow: self.write_log( f"Won't follow {username}: does not meet user_min_follow requirement" ) return if self.user_max_follow != 0 and followers > self.user_max_follow: self.write_log( f"Won't follow {username}: does not meet user_max_follow requirement" ) return except Exception: pass if ( check_already_followed( self, user_id=self.media_by_tag[0]["node"]["owner"]["id"] ) == 1 ): self.write_log( f"Already followed before {self.media_by_tag[0]['node']['owner']['id']}" ) self.next_iteration["Follow"] = time.time() + self.add_time( self.follow_delay / 2 ) return log_string = ( f"Trying to follow: {self.media_by_tag[0]['node']['owner']['id']}" ) self.write_log(log_string) self.next_iteration["Follow"] = time.time() + self.add_time( self.follow_delay ) if ( self.follow( user_id=self.media_by_tag[0]["node"]["owner"]["id"], username=username, ) is not False ): self.bot_follow_list.append( [self.media_by_tag[0]["node"]["owner"]["id"], time.time()] ) self.next_iteration["Follow"] = time.time() + self.add_time( self.follow_delay ) def populate_from_feed(self): self.get_media_id_recent_feed() try: for mediafeed_user in self.media_on_feed: feed_username = mediafeed_user["node"]["owner"]["username"] feed_user_id = mediafeed_user["node"]["owner"]["id"] # print(check_if_userid_exists(self, userid=feed_user_id)) if check_if_userid_exists(self, userid=feed_user_id) is False: insert_username(self, user_id=feed_user_id, username=feed_username) self.write_log(f"Inserted user {feed_username} from recent feed") except: self.write_log("Notice: could not populate from recent feed") def new_auto_mod_unfollow(self): if time.time() > self.next_iteration["Unfollow"] and self.unfollow_per_day != 0: if (time.time() - self.bot_start_ts) < 30: # let bot initialize return if get_username_row_count(self) < 20: self.write_log( f"> Waiting for database to populate before unfollowing (progress {str(get_username_row_count(self))} /20)" ) if self.unfollow_recent_feed is True: self.write_log("Will try to populate using recent feed") self.populate_from_feed() self.next_iteration["Unfollow"] = time.time() + ( self.add_time(self.unfollow_delay) / 2 ) return # DB doesn't have enough followers yet if self.bot_mode == 0 or self.bot_mode == 3: try: if ( time.time() > self.next_iteration["Populate"] and self.unfollow_recent_feed is True ): self.populate_from_feed() self.next_iteration["Populate"] = time.time() + ( self.add_time(360) ) except: self.write_log( "Notice: Could not populate from recent feed right now" ) log_string = f"Trying to unfollow #{self.unfollow_counter + 1}:" self.write_log(log_string) self.auto_unfollow() self.next_iteration["Unfollow"] = time.time() + self.add_time( self.unfollow_delay ) def new_auto_mod_comments(self): if ( time.time() > self.next_iteration["Comments"] and self.comments_per_day != 0 and len(self.media_by_tag) > 0 and self.check_exisiting_comment(self.media_by_tag[0]["node"]["shortcode"]) is False ): comment_text = self.generate_comment() log_string = f"Trying to comment: {self.media_by_tag[0]['node']['id']}" self.write_log(log_string) if ( self.comment(self.media_by_tag[0]["node"]["id"], comment_text) is not False ): self.next_iteration["Comments"] = time.time() + self.add_time( self.comments_delay ) def add_time(self, time): """ Make some random for next iteration""" return time * 0.9 + time * 0.2 * random.random() def generate_comment(self): c_list = list(itertools.product(*self.comment_list)) repl = [(" ", " "), (" .", "."), (" !", "!")] res = " ".join(random.choice(c_list)) for s, r in repl: res = res.replace(s, r) return res.capitalize() def check_exisiting_comment(self, media_code): url_check = self.url_media % (media_code) try: check_comment = self.s.get(url_check) if check_comment.status_code == 200: if "dialog-404" in check_comment.text: self.write_log( f"Tried to comment {media_code} but it doesn't exist (404). Resuming..." ) del self.media_by_tag[0] return True all_data = json.loads( re.search( "window._sharedData = (.*?);", check_comment.text, re.DOTALL ).group(1) )["entry_data"]["PostPage"][ 0 ] # window._sharedData = (.*?); if ( all_data["graphql"]["shortcode_media"]["owner"]["id"] == self.user_id ): self.write_log("Keep calm - It's your own media ;)") # Del media to don't loop on it del self.media_by_tag[0] return True try: comment_list = list( all_data["graphql"]["shortcode_media"]["edge_media_to_comment"][ "edges" ] ) except: comment_list = list( all_data["graphql"]["shortcode_media"][ "edge_media_to_parent_comment" ]["edges"] ) for d in comment_list: if d["node"]["owner"]["id"] == self.user_id: self.write_log("Keep calm - Media already commented ;)") # Del media to don't loop on it del self.media_by_tag[0] return True return False elif check_comment.status_code == 404: insert_media( self, self.media_by_tag[0]["node"]["id"], str(check_comment.status_code), ) self.write_log( f"Tried to comment {media_code} but it doesn't exist (404). Resuming..." ) del self.media_by_tag[0] return True else: insert_media( self, self.media_by_tag[0]["node"]["id"], str(check_comment.status_code), ) self.media_by_tag.remove(self.media_by_tag[0]) return True except: self.write_log("Couldn't comment post, resuming.") del self.media_by_tag[0] return True def auto_unlike(self): checking = True while checking: media_to_unlike = get_medias_to_unlike(self) if media_to_unlike: request = self.unlike(media_to_unlike) if request.status_code == 200: update_media_complete(self, media_to_unlike) else: self.write_log("Couldn't unlike media, resuming.") checking = False else: self.write_log("no medias to unlike") checking = False def auto_unfollow(self): checking = True while checking: username_row = get_username_to_unfollow_random(self) if not username_row: self.write_log("Looks like there is nobody to unfollow.") return False current_id = username_row[0] current_user = username_row[1] unfollow_count = username_row[2] if not current_user: current_user = self.get_username_by_user_id(user_id=current_id) if not current_user: log_string = "api limit reached from instagram. Will try later" self.write_log(log_string) return False for wluser in self.unfollow_whitelist: if wluser == current_user: log_string = "found whitelist user, starting search again" self.write_log(log_string) break else: checking = False if self.login_status: log_string = f"Getting user info : {current_user}" self.write_log(log_string) if self.login_status == 1: url_tag = self.url_user_detail % (current_user) try: r = self.s.get(url_tag) if ( r.text.find( "The link you followed may be broken, or the page may have been removed." ) != -1 ): log_string = ( f"Looks like account was deleted, skipping : {current_user}" ) self.write_log(log_string) insert_unfollow_count(self, user_id=current_id) time.sleep(3) return False all_data = json.loads( re.search( "window._sharedData = (.*?);</script>", r.text, re.DOTALL ).group(1) )["entry_data"]["ProfilePage"][0] user_info = all_data["graphql"]["user"] i = 0 log_string = "Checking user info.." self.write_log(log_string) follows = user_info["edge_follow"]["count"] follower = user_info["edge_followed_by"]["count"] media = user_info["edge_owner_to_timeline_media"]["count"] follow_viewer = user_info["follows_viewer"] followed_by_viewer = user_info["followed_by_viewer"] requested_by_viewer = user_info["requested_by_viewer"] has_requested_viewer = user_info["has_requested_viewer"] log_string = f"Follower : {follower}" self.write_log(log_string) log_string = f"Following : {follows}" self.write_log(log_string) log_string = f"Media : {media}" self.write_log(log_string) if follows == 0 or follower / follows > 2: self.is_selebgram = True self.is_fake_account = False self.write_log(" >>>This is probably Selebgram account") elif follower == 0 or follows / follower > 2: self.is_fake_account = True self.is_selebgram = False self.write_log(" >>>This is probably Fake account") else: self.is_selebgram = False self.is_fake_account = False self.write_log(" >>>This is a normal account") if media > 0 and follows / media < 25 and follower / media < 25: self.is_active_user = True self.write_log(" >>>This user is active") else: self.is_active_user = False self.write_log(" >>>This user is passive") if follow_viewer or has_requested_viewer: self.is_follower = True self.write_log(" >>>This account is following you") else: self.is_follower = False self.write_log(" >>>This account is NOT following you") if followed_by_viewer or requested_by_viewer: self.is_following = True self.write_log(" >>>You are following this account") else: self.is_following = False self.write_log(" >>>You are NOT following this account") except: logging.exception("Except on auto_unfollow!") time.sleep(3) return False else: return False if ( self.is_selebgram is not False or self.is_fake_account is not False or self.is_active_user is not True or self.is_follower is not True ): self.write_log(current_user) self.unfollow(current_id) # don't insert unfollow count as it is done now inside unfollow() # insert_unfollow_count(self, user_id=current_id) elif self.is_following is not True: # we are not following this account, hence we unfollowed it, let's keep track insert_unfollow_count(self, user_id=current_id) def unfollow_recent_feed(self): if len(self.media_on_feed) == 0: self.get_media_id_recent_feed() if ( len(self.media_on_feed) != 0 and self.is_follower_number < 5 and time.time() > self.next_iteration["Unfollow"] and self.unfollow_per_day != 0 ): self.get_media_id_recent_feed() chooser = random.randint(0, len(self.media_on_feed) - 1) self.current_user = self.media_on_feed[chooser]["node"]["owner"]["username"] self.current_id = self.media_on_feed[chooser]["node"]["owner"]["id"] current_user = self.current_user current_id = self.current_id if self.login_status: log_string = f"Getting user info : {current_user}" self.write_log(log_string) if self.login_status == 1: url_tag = self.url_user_detail % (current_user) try: r = self.s.get(url_tag) if ( r.text.find( "The link you followed may be broken, or the page may have been removed." ) != -1 ): log_string = ( f"Looks like account was deleted, skipping : {current_user}" ) self.write_log(log_string) insert_unfollow_count(self, user_id=current_id) time.sleep(3) return False all_data = json.loads( re.search( "window._sharedData = (.*?);</script>", r.text, re.DOTALL ).group(1) )["entry_data"]["ProfilePage"][0] user_info = all_data["graphql"]["user"] i = 0 log_string = "Checking user info.." self.write_log(log_string) follows = user_info["edge_follow"]["count"] follower = user_info["edge_followed_by"]["count"] media = user_info["edge_owner_to_timeline_media"]["count"] follow_viewer = user_info["follows_viewer"] followed_by_viewer = user_info["followed_by_viewer"] requested_by_viewer = user_info["requested_by_viewer"] has_requested_viewer = user_info["has_requested_viewer"] log_string = f"Follower : {follower}" self.write_log(log_string) log_string = f"Following : {follows}" self.write_log(log_string) log_string = f"Media : {media}" self.write_log(log_string) if follows == 0 or follower / follows > 2: self.is_selebgram = True self.is_fake_account = False self.write_log(" >>>This is probably Selebgram account") elif follower == 0 or follows / follower > 2: self.is_fake_account = True self.is_selebgram = False self.write_log(" >>>This is probably Fake account") else: self.is_selebgram = False self.is_fake_account = False self.write_log(" >>>This is a normal account") if media > 0 and follows / media < 25 and follower / media < 25: self.is_active_user = True self.write_log(" >>>This user is active") else: self.is_active_user = False self.write_log(" >>>This user is passive") if follow_viewer or has_requested_viewer: self.is_follower = True self.write_log(" >>>This account is following you") else: self.is_follower = False self.write_log(" >>>This account is NOT following you") if followed_by_viewer or requested_by_viewer: self.is_following = True self.write_log(" >>>You are following this account") else: self.is_following = False self.write_log(" >>>You are NOT following this account") except: logging.exception("Except on auto_unfollow!") time.sleep(3) return False else: return False if ( self.is_selebgram is not False or self.is_fake_account is not False or self.is_active_user is not True or self.is_follower is not True ): self.write_log(current_user) self.unfollow(current_id) self.next_iteration["Unfollow"] = time.time() + self.add_time( self.unfollow_delay ) # don't insert unfollow count as it is done now inside unfollow() # insert_unfollow_count(self, user_id=current_id) elif self.is_following is not True: # we are not following this account, hence we unfollowed it, let's keep track insert_unfollow_count(self, user_id=current_id) time.sleep(8) def get_media_id_recent_feed(self): if self.login_status: now_time = datetime.datetime.now() log_string = f"{self.user_login} : Get media id on recent feed" self.write_log(log_string) if self.login_status == 1: url_tag = "https://www.instagram.com/" try: r = self.s.get(url_tag) jsondata = re.search( "additionalDataLoaded\('feed',({.*})\);", r.text ).group(1) all_data = json.loads(jsondata.strip()) self.media_on_feed = list( all_data["user"]["edge_web_feed_timeline"]["edges"] ) log_string = f"Media in recent feed = {len(self.media_on_feed)}" self.write_log(log_string) except: logging.exception("get_media_id_recent_feed") self.media_on_feed = [] time.sleep(20) return 0 else: return 0 def write_log(self, log_text): """ Write log by print() or logger """ if self.log_mod == 0: try: now_time = datetime.datetime.now() print(f"{now_time.strftime('%d.%m.%Y_%H:%M')} {log_text}") except UnicodeEncodeError: print("Your text has unicode problem!") elif self.log_mod == 1: # Create log_file if not exist. if self.log_file == 0: self.log_file = 1 now_time = datetime.datetime.now() self.log_full_path = "%s%s_%s.log" % ( self.log_file_path, self.user_login, now_time.strftime("%d.%m.%Y_%H:%M"), ) formatter = logging.Formatter("%(asctime)s - %(name)s " "- %(message)s") self.logger = logging.getLogger(self.user_login) self.hdrl = logging.FileHandler(self.log_full_path, mode="w") self.hdrl.setFormatter(formatter) self.logger.setLevel(level=logging.INFO) self.logger.addHandler(self.hdrl) # Log to log file. try: self.logger.info(log_text) except UnicodeEncodeError: print("Your text has unicode problem!")
import json import logging import os import re import subprocess import tempfile from datetime import datetime from pathlib import Path import requests from fastapi import FastAPI from fastapi_utils.tasks import repeat_every from grayskull.__main__ import create_python_recipe from grayskull.cli import CLIConfig from grayskull.strategy.py_base import download_sdist_pkg from souschef.recipe import Recipe app = FastAPI() GH_TOKEN = os.getenv("GH_TOKEN") CHECK_NOTIFICATIONS_INTERVAL = 60 * 4 def send_comment(issue_url: str, msg: str): response = requests.post( f"{issue_url}/comments", headers={"Authorization": f"token {GH_TOKEN}"}, data=json.dumps({"body": msg}), ) response.raise_for_status() def get_issue_info(issue_url): issue_info = requests.get(issue_url) issue_info.raise_for_status() return issue_info.json() def get_pr_info(pr_url): pr_info = requests.get(pr_url) pr_info.raise_for_status() return pr_info.json() def convert_to_str(ingredient): str_dep = f"{ingredient.package_name}" if ingredient.constrains: str_dep += f" {ingredient.constrains}" if ingredient.selector: str_dep += f" # [{ingredient.selector}]" return str_dep def get_table_deps(current_recipe, gs_recipe, req_section): table = f"================ **{req_section.upper()}** ================" table += f"\nRequirements for **{req_section}**\n" table += "| Current Deps | Grayskull found | |\n" table += "|--------------|-----------------|--|\n" for current_dep in current_recipe["requirements"][req_section]: for gs_dep in gs_recipe["requirements"][req_section] or []: if gs_dep.package_name == current_dep.package_name: if ( gs_dep.constrains == current_dep.constrains and gs_dep.selector == current_dep.selector ): str_dep = convert_to_str(gs_dep) table += f"| {str_dep} | {str_dep} | :heavy_check_mark: |\n" else: table += ( f"| {convert_to_str(current_dep)} |" f" {convert_to_str(gs_dep)} | :heavy_exclamation_mark: |\n" ) break else: str_dep = convert_to_str(current_dep) table += f"| {str_dep} | | :x: |\n" for gs_dep in gs_recipe["requirements"][req_section] or []: if gs_dep.package_name not in ( current_recipe["requirements"][req_section] or [] ): table += f"| | {convert_to_str(gs_dep)} | :heavy_plus_sign: |\n" return table def get_gs_message_show_requirements(recipe: Recipe, gs_recipe: Recipe) -> str: msg = "" if "build" in recipe["requirements"]: msg += get_table_deps(recipe, gs_recipe, "build") msg += "\n\n" if "host" in recipe["requirements"]: msg += get_table_deps(recipe, gs_recipe, "host") msg += "\n\n" if "run" in recipe["requirements"]: msg += get_table_deps(recipe, gs_recipe, "run") return msg def _extract_send_requirements(pr_json, folder, render_cb, response_msg): subprocess.run( [ "git", "clone", pr_json["head"]["repo"]["git_url"], folder, "--branch", pr_json["head"]["ref"], ], check=True, ) recipe_path = Path(folder) / "recipe" / "meta.yaml" if not recipe_path.is_file(): recipe_path = Path(folder) / "recipe" / "meta.yml" if not recipe_path.is_file(): raise ValueError( "There is no recipe file in recipe folder (meta.yaml or meta.yml)" f" - {pr_json["head"]["repo"]["git_url"]}" ) recipe = Recipe(load_file=recipe_path, show_comments=False) rendered_recipe = render_cb([str(Path(folder) / "recipe")], print_results=False) pkg_url = rendered_recipe[0][0].meta["source"]["url"] pkg_file_name = pkg_url.strip().split("/")[-1] with tempfile.TemporaryDirectory() as sdist_folder: CLIConfig(stdout=False) download_sdist_pkg(pkg_url, Path(sdist_folder) / pkg_file_name) sdist_file = Path(sdist_folder) / pkg_file_name gs_recipe = create_python_recipe( str(sdist_file), is_strict_cf=True, from_local_sdist=True, )[0] send_comment( response_msg["issue_url"], get_gs_message_show_requirements(recipe, gs_recipe), ) def show_requirements(response_msg: dict): from conda_build.cli.main_render import execute as render_cb issue_json = get_issue_info(response_msg["issue_url"]) pr_json = get_pr_info(issue_json["pull_request"]["url"]) with tempfile.TemporaryDirectory() as folder: _extract_send_requirements(pr_json, folder, render_cb, response_msg) def run_command_msg(response_msg): msg = response_msg["body"] all_cmds = { show_requirements: re.compile( r"@conda\-grayskull\s+show\s+requirement[s]*", re.IGNORECASE ) } for run_function, re_match in all_cmds.items(): logging.info(f"Message received: {msg}") if re_match.search(msg): send_comment(response_msg["issue_url"], "Working on your request...") run_function(response_msg) break else: if msg and "@conda-grayskull" in msg: send_comment( response_msg["issue_url"], "Command not recognized, please inform a valid command.", ) @app.on_event("startup") @repeat_every(seconds=CHECK_NOTIFICATIONS_INTERVAL) def check_notifications(): logging.info("Checking notifications...") response = requests.get( "https://api.github.com/notifications", params={"reason": "mention", "unread": True}, headers={"Authorization": f"token {GH_TOKEN}"}, ) response.raise_for_status() all_mentions = response.json() last_update = None for mention in all_mentions: mention_updated_at = mention["updated_at"] if mention_updated_at.endswith("Z"): mention_updated_at = mention_updated_at[:-1] mention_updated_at = datetime.fromisoformat(mention_updated_at) if last_update: last_update = max(mention_updated_at, last_update) else: last_update = mention_updated_at logging.info(f"Comment url: {mention["subject"]["latest_comment_url"]}") response = requests.get(mention["subject"]["latest_comment_url"]) response.raise_for_status() msg = response.json() run_command_msg(msg) requests.put( "https://api.github.com/notifications", headers={"Authorization": f"token {GH_TOKEN}"}, params={"last_read_at": last_update, "read": True}, )
import json import logging import os import re import subprocess import tempfile from datetime import datetime from pathlib import Path import requests from fastapi import FastAPI from fastapi_utils.tasks import repeat_every from grayskull.__main__ import create_python_recipe from grayskull.cli import CLIConfig from grayskull.strategy.py_base import download_sdist_pkg from souschef.recipe import Recipe app = FastAPI() GH_TOKEN = os.getenv("GH_TOKEN") CHECK_NOTIFICATIONS_INTERVAL = 60 * 4 def send_comment(issue_url: str, msg: str): response = requests.post( f"{issue_url}/comments", headers={"Authorization": f"token {GH_TOKEN}"}, data=json.dumps({"body": msg}), ) response.raise_for_status() def get_issue_info(issue_url): issue_info = requests.get(issue_url) issue_info.raise_for_status() return issue_info.json() def get_pr_info(pr_url): pr_info = requests.get(pr_url) pr_info.raise_for_status() return pr_info.json() def convert_to_str(ingredient): str_dep = f"{ingredient.package_name}" if ingredient.constrains: str_dep += f" {ingredient.constrains}" if ingredient.selector: str_dep += f" # [{ingredient.selector}]" return str_dep def get_table_deps(current_recipe, gs_recipe, req_section): table = f"================ **{req_section.upper()}** ================" table += f"\nRequirements for **{req_section}**\n" table += "| Current Deps | Grayskull found | |\n" table += "|--------------|-----------------|--|\n" for current_dep in current_recipe["requirements"][req_section]: for gs_dep in gs_recipe["requirements"][req_section] or []: if gs_dep.package_name == current_dep.package_name: if ( gs_dep.constrains == current_dep.constrains and gs_dep.selector == current_dep.selector ): str_dep = convert_to_str(gs_dep) table += f"| {str_dep} | {str_dep} | :heavy_check_mark: |\n" else: table += ( f"| {convert_to_str(current_dep)} |" f" {convert_to_str(gs_dep)} | :heavy_exclamation_mark: |\n" ) break else: str_dep = convert_to_str(current_dep) table += f"| {str_dep} | | :x: |\n" for gs_dep in gs_recipe["requirements"][req_section] or []: if gs_dep.package_name not in ( current_recipe["requirements"][req_section] or [] ): table += f"| | {convert_to_str(gs_dep)} | :heavy_plus_sign: |\n" return table def get_gs_message_show_requirements(recipe: Recipe, gs_recipe: Recipe) -> str: msg = "" if "build" in recipe["requirements"]: msg += get_table_deps(recipe, gs_recipe, "build") msg += "\n\n" if "host" in recipe["requirements"]: msg += get_table_deps(recipe, gs_recipe, "host") msg += "\n\n" if "run" in recipe["requirements"]: msg += get_table_deps(recipe, gs_recipe, "run") return msg def _extract_send_requirements(pr_json, folder, render_cb, response_msg): subprocess.run( [ "git", "clone", pr_json["head"]["repo"]["git_url"], folder, "--branch", pr_json["head"]["ref"], ], check=True, ) recipe_path = Path(folder) / "recipe" / "meta.yaml" if not recipe_path.is_file(): recipe_path = Path(folder) / "recipe" / "meta.yml" if not recipe_path.is_file(): raise ValueError( "There is no recipe file in recipe folder (meta.yaml or meta.yml)" f" - {pr_json['head']['repo']['git_url']}" ) recipe = Recipe(load_file=recipe_path, show_comments=False) rendered_recipe = render_cb([str(Path(folder) / "recipe")], print_results=False) pkg_url = rendered_recipe[0][0].meta["source"]["url"] pkg_file_name = pkg_url.strip().split("/")[-1] with tempfile.TemporaryDirectory() as sdist_folder: CLIConfig(stdout=False) download_sdist_pkg(pkg_url, Path(sdist_folder) / pkg_file_name) sdist_file = Path(sdist_folder) / pkg_file_name gs_recipe = create_python_recipe( str(sdist_file), is_strict_cf=True, from_local_sdist=True, )[0] send_comment( response_msg["issue_url"], get_gs_message_show_requirements(recipe, gs_recipe), ) def show_requirements(response_msg: dict): from conda_build.cli.main_render import execute as render_cb issue_json = get_issue_info(response_msg["issue_url"]) pr_json = get_pr_info(issue_json["pull_request"]["url"]) with tempfile.TemporaryDirectory() as folder: _extract_send_requirements(pr_json, folder, render_cb, response_msg) def run_command_msg(response_msg): msg = response_msg["body"] all_cmds = { show_requirements: re.compile( r"@conda\-grayskull\s+show\s+requirement[s]*", re.IGNORECASE ) } for run_function, re_match in all_cmds.items(): logging.info(f"Message received: {msg}") if re_match.search(msg): send_comment(response_msg["issue_url"], "Working on your request...") run_function(response_msg) break else: if msg and "@conda-grayskull" in msg: send_comment( response_msg["issue_url"], "Command not recognized, please inform a valid command.", ) @app.on_event("startup") @repeat_every(seconds=CHECK_NOTIFICATIONS_INTERVAL) def check_notifications(): logging.info("Checking notifications...") response = requests.get( "https://api.github.com/notifications", params={"reason": "mention", "unread": True}, headers={"Authorization": f"token {GH_TOKEN}"}, ) response.raise_for_status() all_mentions = response.json() last_update = None for mention in all_mentions: mention_updated_at = mention["updated_at"] if mention_updated_at.endswith("Z"): mention_updated_at = mention_updated_at[:-1] mention_updated_at = datetime.fromisoformat(mention_updated_at) if last_update: last_update = max(mention_updated_at, last_update) else: last_update = mention_updated_at logging.info(f"Comment url: {mention['subject']['latest_comment_url']}") response = requests.get(mention["subject"]["latest_comment_url"]) response.raise_for_status() msg = response.json() run_command_msg(msg) requests.put( "https://api.github.com/notifications", headers={"Authorization": f"token {GH_TOKEN}"}, params={"last_read_at": last_update, "read": True}, )
# Copyright (c) 2018-2019 Patricio Cubillos and contributors. # bibmanager is open-source software under the MIT license (see LICENSE). __all__ = [ 'manager', 'search', 'display', 'add_bibtex', 'update', 'key_update', ] import os import re import json import urllib import textwrap import pickle import requests from .. import bib_manager as bm from .. import config_manager as cm from .. import utils as u def manager(querry=None): """ A manager, it doesn't really do anything, it just delegates. """ rows = int(cm.get('ads_display')) if querry is None and not os.path.exists(u.BM_CACHE): print("There are no more entries for this querry.") return if querry is None: with open(u.BM_CACHE, 'rb') as handle: results, querry, start, index, nmatch = pickle.load(handle) last = start + len(results) if last < nmatch and index + rows > last: new_results, nmatch = search(querry, start=last) results = results[index-start:] + new_results start = index last = start + len(results) else: start = 0 index = start results, nmatch = search(querry, start=start) display(results, start, index, rows, nmatch) index += rows if index >= nmatch: with u.ignored(OSError): os.remove(u.BM_CACHE) else: with open(u.BM_CACHE, 'wb') as handle: pickle.dump([results, querry, start, index, nmatch], handle, protocol=pickle.HIGHEST_PROTOCOL) def search(querry, start=0, cache_rows=200, sort='pubdate+desc'): """ Make a querry from ADS. Parameters ---------- querry: String A querry string like an entry in the new ADS interface: https://ui.adsabs.harvard.edu/ start: Integer Starting index of entry to return. cache_rows: Integer Maximum number of entries to return. sort: String Sorting field and direction to use. Returns ------- results: List of dicts Querry outputs between indices start and start+rows. nmatch: Integer Total number of entries matched by the querry. Resources --------- A comprehensive description of the querry format: - http://adsabs.github.io/help/search/ Description of the querry parameters: - https://github.com/adsabs/adsabs-dev-api/blob/master/Search_API.ipynb Examples -------- >>> import bibmanager.ads_manager as am >>> # Search entries by author (note the need for double quotes, >>> # otherwise, the search might produce bogus results): >>> querry = 'author:"cubillos, p"' >>> results, nmatch = am.search(querry) >>> # Search entries by first author: >>> querry = 'author:"^cubillos, p"' >>> # Combine search by first author and year: >>> querry = 'author:"^cubillos, p" year:2017' >>> # Restrict seach to article-type entries: >>> querry = 'author:"^cubillos, p" property:article' >>> # Restrict seach to peer-reviewed articles: >>> querry = 'author:"^cubillos, p" property:refereed' >>> # Attempt with invalid token: >>> results, nmatch = am.search(querry) ValueError: Invalid ADS request: Unauthorized, check you have a valid ADS token. >>> # Attempt with invalid querry ('properties' instead of 'property'): >>> results, nmatch = am.search('author:"^cubillos, p" properties:refereed') ValueError: Invalid ADS request: org.apache.solr.search.SyntaxError: org.apache.solr.common.SolrException: undefined field properties """ token = cm.get('ads_token') querry = urllib.parse.quote(querry) r = requests.get('https://api.adsabs.harvard.edu/v1/search/query?' f'q={querry}&start={start}&rows={cache_rows}' f'&sort={sort}&fl=title,author,year,bibcode,pub', headers={'Authorization': f'Bearer {token}'}) resp = r.json() if 'error' in resp: if resp['error'] == 'Unauthorized': raise ValueError(f"Invalid ADS request: {resp["error"]}, " "check you have a valid ADS token.") raise ValueError(f"Invalid ADS request:\n{resp["error"]["msg"]}.") nmatch = resp['response']['numFound'] results = resp['response']['docs'] return results, nmatch def display(results, start, index, rows, nmatch, short=True): """ Show on the prompt a list of entries from an ADS search. Parameters ---------- results: List of dicts Subset of entries returned by a querry. start: Integer Index assigned to first entry in results. index: Integer First index to display. rows: Integer Number of entries to display. nmatch: Integer Total number of entries corresponding to querry (not necessarily the number of entries in results). short: Bool Format for author list. If True, truncate with 'et al' after the second author. Examples -------- >>> import bibmanager.ads_manager as am >>> start = index = 0 >>> querry = 'author:"^cubillos, p" property:refereed' >>> results, nmatch = am.search(querry, start=start) >>> display(results, start, index, rows, nmatch) """ for result in results[index-start:index-start+rows]: title = textwrap.fill(f"Title: {result["title"][0]}", width=78, subsequent_indent=' ') author_list = [u.parse_name(author) for author in result['author']] authors = textwrap.fill(f"Authors: {u.get_authors(author_list, short)}", width=78, subsequent_indent=' ') adsurl = ("adsurl: https://ui.adsabs.harvard.edu/abs/" + f"{result["bibcode"]}") bibcode = f"\n{u.BOLD}bibcode{u.END}: {result["bibcode"]}" print(f"\n{title}\n{authors}\n{adsurl}{bibcode}") if index + rows < nmatch: more = " To show the next set, execute:\nbibm ads-search -n" else: more = "" print(f"\nShowing entries {index+1}--{min(index+rows, nmatch)} out of " f"{nmatch} matches.{more}") def add_bibtex(input_bibcodes, input_keys, eprints=[], dois=[], update_keys=True, base=None): """ Add bibtex entries from a list of ADS bibcodes, with specified keys. New entries will replace old ones without asking if they are duplicates. Parameters ---------- input_bibcodes: List of strings A list of ADS bibcodes. imput_keys: List of strings BibTeX keys to assign to each bibcode. eprints: List of strings List of ArXiv IDs corresponding to the input bibcodes. dois: List of strings List of DOIs corresponding to the input bibcodes. update_keys: Bool If True, attempt to update keys of entries that were updated from arxiv to published versions. base: List of Bib() objects If None, merge new entries into the bibmanager database. If not None, merge new entries into base. Returns ------- bibs: List of Bib() objects Updated list of BibTeX entries. Examples -------- >>> import bibmanager.ads_manager as am >>> # A successful add call: >>> bibcodes = ['1925PhDT.........1P'] >>> keys = ['Payne1925phdStellarAtmospheres'] >>> am.add_bibtex(bibcodes, keys) >>> # A failing add call: >>> bibcodes = ['1925PhDT....X....1P'] >>> am.add_bibtex(bibcodes, keys) Error: There were no entries found for the input bibcodes. >>> # A successful add call with multiple entries: >>> bibcodes = ['1925PhDT.........1P', '2018MNRAS.481.5286F'] >>> keys = ['Payne1925phdStellarAtmospheres', 'FolsomEtal2018mnrasHD219134'] >>> am.add_bibtex(bibcodes, keys) >>> # A partially failing call will still add those that succeed: >>> bibcodes = ['1925PhDT.....X...1P', '2018MNRAS.481.5286F'] >>> am.add_bibtex(bibcodes, keys) Warning: bibcode '1925PhDT.....X...1P' not found. """ token = cm.get('ads_token') # Keep the originals untouched (copies will be modified): bibcodes, keys = input_bibcodes.copy(), input_keys.copy() # Make request: r = requests.post("https://api.adsabs.harvard.edu/v1/export/bibtex", headers={"Authorization": f'Bearer {token}', "Content-type": "application/json"}, data=json.dumps({"bibcode":bibcodes})) resp = r.json() # No valid outputs: if 'error' in resp: if resp['error'] == 'Unauthorized': print('\nError: Unauthorized access to ADS. Check that the ADS ' 'token is valid.') elif resp['error'] == 'no result from solr': print("\nError: There were no entries found for the input bibcodes.") else: print("\nError: ADS request returned an error message:" f"\n{resp["error"]}") return # Keep counts of things: nfound = int(resp['msg'].split()[1]) nreqs = len(bibcodes) # Split output into separate BibTeX entries (keep as strings): results = resp["export"].strip().split("\n\n") new_keys = [] new_bibs = [] founds = [False for _ in bibcodes] arxiv_updates = 0 # Match results to bibcodes,keys: for result in reversed(results): ibib = None new = bm.Bib(result) rkey = new.key doi = new.doi eprint = new.eprint # Output bibcode is input bibcode: if rkey in bibcodes: ibib = bibcodes.index(rkey) new_key = keys[ibib] # Else, check for bibcode updates in remaining bibcodes: elif eprint is not None and eprint in eprints: ibib = eprints.index(eprint) elif doi is not None and doi in dois: ibib = dois.index(doi) if ibib is not None: new_key = keys[ibib] updated_key = key_update(new_key, rkey, bibcodes[ibib]) if update_keys and updated_key.lower() != new_key.lower(): new_key = updated_key new_keys.append([keys[ibib], new_key]) if 'arXiv' in bibcodes[ibib] and 'arXiv' not in new.bibcode: arxiv_updates += 1 new.update_key(new_key) new_bibs.append(new) founds[ibib] = True results.remove(result) # Warnings: if nfound < nreqs or len(results) > 0: warning = u.BANNER + "Warning:\n" # bibcodes not found missing = [bibcode for bibcode,found in zip(bibcodes, founds) if not found] if nfound < nreqs: warning += '\nThere were bibcodes unmatched or not found in ADS:\n - ' warning += '\n - '.join(missing) + "\n" # bibcodes not matched: if len(results) > 0: warning += '\nThese ADS results did not match input bibcodes:\n\n' warning += '\n\n'.join(results) + "\n" warning += u.BANNER print(warning) # Add to bibmanager database or base: updated = bm.merge(new=new_bibs, take='new', base=base) print('(Not counting updated references)') # Report arXiv updates: if arxiv_updates > 0: print(f"\nThere were {arxiv_updates} entries updated from ArXiv to " "their peer-reviewed version.") if len(new_keys) > 0: new_keys = [f" {old} -> {new}" for old,new in new_keys if old != new] if len(new_keys) > 0: print("These entries changed their key:\n" + "\n".join(new_keys)) return updated def update(update_keys=True, base=None): """ Do an ADS querry by bibcode for all entries that have an ADS bibcode. Replacing old entries with the new ones. The main use of this function is to update arxiv version of articles. Parameters ---------- update_keys: Bool If True, attempt to update keys of entries that were updated from arxiv to published versions. """ if base is None: bibs = bm.load() else: bibs = base keys = [bib.key for bib in bibs if bib.bibcode is not None] bibcodes = [bib.bibcode for bib in bibs if bib.bibcode is not None] eprints = [bib.eprint for bib in bibs if bib.bibcode is not None] dois = [bib.doi for bib in bibs if bib.bibcode is not None] # Querry-replace: bibs = add_bibtex(bibcodes, keys, eprints, dois, update_keys, base=base) return bibs def key_update(key, bibcode, alternate_bibcode): r""" Update key with year and journal of arxiv version of a key. This function will search and update the year in a key, and the journal if the key contains the word 'arxiv' (case insensitive). The function extracts the info from the old and new bibcodes. ADS bibcode format: http://adsabs.github.io/help/actions/bibcode Examples -------- >>> import bibmanager.ads_manager as am >>> key = 'BeaulieuEtal2010arxivGJ436b' >>> bibcode = '2011ApJ...731...16B' >>> alternate_bibcode = '2010arXiv1007.0324B' >>> new_key = am.key_update(key, bibcode, alternate_bibcode) >>> print(f'{key}\n{new_key}') BeaulieuEtal2010arxivGJ436b BeaulieuEtal2011apjGJ436b >>> key = 'CubillosEtal2018arXivRetrievals' >>> bibcode = '2019A&A...550A.100B' >>> alternate_bibcode = '2018arXiv123401234B' >>> new_key = am.key_update(key, bibcode, alternate_bibcode) >>> print(f'{key}\n{new_key}') CubillosEtal2018arXivRetrievals CubillosEtal2019aaRetrievals """ old_year = alternate_bibcode[0:4] year = bibcode[0:4] # Update year: if old_year != year and old_year in key: key = key.replace(old_year, year, 1) # Update journal: journal = bibcode[4:9].replace('.','').replace('&','').lower() # Search for the word 'arxiv' in key: ijournal = key.lower().find('arxiv') if ijournal >= 0: key = "".join([key[:ijournal], journal, key[ijournal+5:]]) return key
# Copyright (c) 2018-2019 Patricio Cubillos and contributors. # bibmanager is open-source software under the MIT license (see LICENSE). __all__ = [ 'manager', 'search', 'display', 'add_bibtex', 'update', 'key_update', ] import os import re import json import urllib import textwrap import pickle import requests from .. import bib_manager as bm from .. import config_manager as cm from .. import utils as u def manager(querry=None): """ A manager, it doesn't really do anything, it just delegates. """ rows = int(cm.get('ads_display')) if querry is None and not os.path.exists(u.BM_CACHE): print("There are no more entries for this querry.") return if querry is None: with open(u.BM_CACHE, 'rb') as handle: results, querry, start, index, nmatch = pickle.load(handle) last = start + len(results) if last < nmatch and index + rows > last: new_results, nmatch = search(querry, start=last) results = results[index-start:] + new_results start = index last = start + len(results) else: start = 0 index = start results, nmatch = search(querry, start=start) display(results, start, index, rows, nmatch) index += rows if index >= nmatch: with u.ignored(OSError): os.remove(u.BM_CACHE) else: with open(u.BM_CACHE, 'wb') as handle: pickle.dump([results, querry, start, index, nmatch], handle, protocol=pickle.HIGHEST_PROTOCOL) def search(querry, start=0, cache_rows=200, sort='pubdate+desc'): """ Make a querry from ADS. Parameters ---------- querry: String A querry string like an entry in the new ADS interface: https://ui.adsabs.harvard.edu/ start: Integer Starting index of entry to return. cache_rows: Integer Maximum number of entries to return. sort: String Sorting field and direction to use. Returns ------- results: List of dicts Querry outputs between indices start and start+rows. nmatch: Integer Total number of entries matched by the querry. Resources --------- A comprehensive description of the querry format: - http://adsabs.github.io/help/search/ Description of the querry parameters: - https://github.com/adsabs/adsabs-dev-api/blob/master/Search_API.ipynb Examples -------- >>> import bibmanager.ads_manager as am >>> # Search entries by author (note the need for double quotes, >>> # otherwise, the search might produce bogus results): >>> querry = 'author:"cubillos, p"' >>> results, nmatch = am.search(querry) >>> # Search entries by first author: >>> querry = 'author:"^cubillos, p"' >>> # Combine search by first author and year: >>> querry = 'author:"^cubillos, p" year:2017' >>> # Restrict seach to article-type entries: >>> querry = 'author:"^cubillos, p" property:article' >>> # Restrict seach to peer-reviewed articles: >>> querry = 'author:"^cubillos, p" property:refereed' >>> # Attempt with invalid token: >>> results, nmatch = am.search(querry) ValueError: Invalid ADS request: Unauthorized, check you have a valid ADS token. >>> # Attempt with invalid querry ('properties' instead of 'property'): >>> results, nmatch = am.search('author:"^cubillos, p" properties:refereed') ValueError: Invalid ADS request: org.apache.solr.search.SyntaxError: org.apache.solr.common.SolrException: undefined field properties """ token = cm.get('ads_token') querry = urllib.parse.quote(querry) r = requests.get('https://api.adsabs.harvard.edu/v1/search/query?' f'q={querry}&start={start}&rows={cache_rows}' f'&sort={sort}&fl=title,author,year,bibcode,pub', headers={'Authorization': f'Bearer {token}'}) resp = r.json() if 'error' in resp: if resp['error'] == 'Unauthorized': raise ValueError(f"Invalid ADS request: {resp['error']}, " "check you have a valid ADS token.") raise ValueError(f"Invalid ADS request:\n{resp['error']['msg']}.") nmatch = resp['response']['numFound'] results = resp['response']['docs'] return results, nmatch def display(results, start, index, rows, nmatch, short=True): """ Show on the prompt a list of entries from an ADS search. Parameters ---------- results: List of dicts Subset of entries returned by a querry. start: Integer Index assigned to first entry in results. index: Integer First index to display. rows: Integer Number of entries to display. nmatch: Integer Total number of entries corresponding to querry (not necessarily the number of entries in results). short: Bool Format for author list. If True, truncate with 'et al' after the second author. Examples -------- >>> import bibmanager.ads_manager as am >>> start = index = 0 >>> querry = 'author:"^cubillos, p" property:refereed' >>> results, nmatch = am.search(querry, start=start) >>> display(results, start, index, rows, nmatch) """ for result in results[index-start:index-start+rows]: title = textwrap.fill(f"Title: {result['title'][0]}", width=78, subsequent_indent=' ') author_list = [u.parse_name(author) for author in result['author']] authors = textwrap.fill(f"Authors: {u.get_authors(author_list, short)}", width=78, subsequent_indent=' ') adsurl = ("adsurl: https://ui.adsabs.harvard.edu/abs/" + f"{result['bibcode']}") bibcode = f"\n{u.BOLD}bibcode{u.END}: {result['bibcode']}" print(f"\n{title}\n{authors}\n{adsurl}{bibcode}") if index + rows < nmatch: more = " To show the next set, execute:\nbibm ads-search -n" else: more = "" print(f"\nShowing entries {index+1}--{min(index+rows, nmatch)} out of " f"{nmatch} matches.{more}") def add_bibtex(input_bibcodes, input_keys, eprints=[], dois=[], update_keys=True, base=None): """ Add bibtex entries from a list of ADS bibcodes, with specified keys. New entries will replace old ones without asking if they are duplicates. Parameters ---------- input_bibcodes: List of strings A list of ADS bibcodes. imput_keys: List of strings BibTeX keys to assign to each bibcode. eprints: List of strings List of ArXiv IDs corresponding to the input bibcodes. dois: List of strings List of DOIs corresponding to the input bibcodes. update_keys: Bool If True, attempt to update keys of entries that were updated from arxiv to published versions. base: List of Bib() objects If None, merge new entries into the bibmanager database. If not None, merge new entries into base. Returns ------- bibs: List of Bib() objects Updated list of BibTeX entries. Examples -------- >>> import bibmanager.ads_manager as am >>> # A successful add call: >>> bibcodes = ['1925PhDT.........1P'] >>> keys = ['Payne1925phdStellarAtmospheres'] >>> am.add_bibtex(bibcodes, keys) >>> # A failing add call: >>> bibcodes = ['1925PhDT....X....1P'] >>> am.add_bibtex(bibcodes, keys) Error: There were no entries found for the input bibcodes. >>> # A successful add call with multiple entries: >>> bibcodes = ['1925PhDT.........1P', '2018MNRAS.481.5286F'] >>> keys = ['Payne1925phdStellarAtmospheres', 'FolsomEtal2018mnrasHD219134'] >>> am.add_bibtex(bibcodes, keys) >>> # A partially failing call will still add those that succeed: >>> bibcodes = ['1925PhDT.....X...1P', '2018MNRAS.481.5286F'] >>> am.add_bibtex(bibcodes, keys) Warning: bibcode '1925PhDT.....X...1P' not found. """ token = cm.get('ads_token') # Keep the originals untouched (copies will be modified): bibcodes, keys = input_bibcodes.copy(), input_keys.copy() # Make request: r = requests.post("https://api.adsabs.harvard.edu/v1/export/bibtex", headers={"Authorization": f'Bearer {token}', "Content-type": "application/json"}, data=json.dumps({"bibcode":bibcodes})) resp = r.json() # No valid outputs: if 'error' in resp: if resp['error'] == 'Unauthorized': print('\nError: Unauthorized access to ADS. Check that the ADS ' 'token is valid.') elif resp['error'] == 'no result from solr': print("\nError: There were no entries found for the input bibcodes.") else: print("\nError: ADS request returned an error message:" f"\n{resp['error']}") return # Keep counts of things: nfound = int(resp['msg'].split()[1]) nreqs = len(bibcodes) # Split output into separate BibTeX entries (keep as strings): results = resp["export"].strip().split("\n\n") new_keys = [] new_bibs = [] founds = [False for _ in bibcodes] arxiv_updates = 0 # Match results to bibcodes,keys: for result in reversed(results): ibib = None new = bm.Bib(result) rkey = new.key doi = new.doi eprint = new.eprint # Output bibcode is input bibcode: if rkey in bibcodes: ibib = bibcodes.index(rkey) new_key = keys[ibib] # Else, check for bibcode updates in remaining bibcodes: elif eprint is not None and eprint in eprints: ibib = eprints.index(eprint) elif doi is not None and doi in dois: ibib = dois.index(doi) if ibib is not None: new_key = keys[ibib] updated_key = key_update(new_key, rkey, bibcodes[ibib]) if update_keys and updated_key.lower() != new_key.lower(): new_key = updated_key new_keys.append([keys[ibib], new_key]) if 'arXiv' in bibcodes[ibib] and 'arXiv' not in new.bibcode: arxiv_updates += 1 new.update_key(new_key) new_bibs.append(new) founds[ibib] = True results.remove(result) # Warnings: if nfound < nreqs or len(results) > 0: warning = u.BANNER + "Warning:\n" # bibcodes not found missing = [bibcode for bibcode,found in zip(bibcodes, founds) if not found] if nfound < nreqs: warning += '\nThere were bibcodes unmatched or not found in ADS:\n - ' warning += '\n - '.join(missing) + "\n" # bibcodes not matched: if len(results) > 0: warning += '\nThese ADS results did not match input bibcodes:\n\n' warning += '\n\n'.join(results) + "\n" warning += u.BANNER print(warning) # Add to bibmanager database or base: updated = bm.merge(new=new_bibs, take='new', base=base) print('(Not counting updated references)') # Report arXiv updates: if arxiv_updates > 0: print(f"\nThere were {arxiv_updates} entries updated from ArXiv to " "their peer-reviewed version.") if len(new_keys) > 0: new_keys = [f" {old} -> {new}" for old,new in new_keys if old != new] if len(new_keys) > 0: print("These entries changed their key:\n" + "\n".join(new_keys)) return updated def update(update_keys=True, base=None): """ Do an ADS querry by bibcode for all entries that have an ADS bibcode. Replacing old entries with the new ones. The main use of this function is to update arxiv version of articles. Parameters ---------- update_keys: Bool If True, attempt to update keys of entries that were updated from arxiv to published versions. """ if base is None: bibs = bm.load() else: bibs = base keys = [bib.key for bib in bibs if bib.bibcode is not None] bibcodes = [bib.bibcode for bib in bibs if bib.bibcode is not None] eprints = [bib.eprint for bib in bibs if bib.bibcode is not None] dois = [bib.doi for bib in bibs if bib.bibcode is not None] # Querry-replace: bibs = add_bibtex(bibcodes, keys, eprints, dois, update_keys, base=base) return bibs def key_update(key, bibcode, alternate_bibcode): r""" Update key with year and journal of arxiv version of a key. This function will search and update the year in a key, and the journal if the key contains the word 'arxiv' (case insensitive). The function extracts the info from the old and new bibcodes. ADS bibcode format: http://adsabs.github.io/help/actions/bibcode Examples -------- >>> import bibmanager.ads_manager as am >>> key = 'BeaulieuEtal2010arxivGJ436b' >>> bibcode = '2011ApJ...731...16B' >>> alternate_bibcode = '2010arXiv1007.0324B' >>> new_key = am.key_update(key, bibcode, alternate_bibcode) >>> print(f'{key}\n{new_key}') BeaulieuEtal2010arxivGJ436b BeaulieuEtal2011apjGJ436b >>> key = 'CubillosEtal2018arXivRetrievals' >>> bibcode = '2019A&A...550A.100B' >>> alternate_bibcode = '2018arXiv123401234B' >>> new_key = am.key_update(key, bibcode, alternate_bibcode) >>> print(f'{key}\n{new_key}') CubillosEtal2018arXivRetrievals CubillosEtal2019aaRetrievals """ old_year = alternate_bibcode[0:4] year = bibcode[0:4] # Update year: if old_year != year and old_year in key: key = key.replace(old_year, year, 1) # Update journal: journal = bibcode[4:9].replace('.','').replace('&','').lower() # Search for the word 'arxiv' in key: ijournal = key.lower().find('arxiv') if ijournal >= 0: key = "".join([key[:ijournal], journal, key[ijournal+5:]]) return key
# (c) 2012-2019, Ansible by Red Hat # # This file is part of Ansible Galaxy # # Ansible Galaxy is free software: you can redistribute it and/or modify # it under the terms of the Apache License as published by # the Apache Software Foundation, either version 2 of the License, or # (at your option) any later version. # # Ansible Galaxy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # Apache License for more details. # # You should have received a copy of the Apache License # along with Galaxy. If not, see <http://www.apache.org/licenses/>. import json import re import attr import semantic_version from galaxy_importer import config from galaxy_importer import constants from galaxy_importer.utils.spdx_licenses import is_valid_license_id SHA1_LEN = 40 REQUIRED_TAG_LIST = [ 'application', 'cloud', 'database', 'infrastructure', 'linux', 'monitoring', 'networking', 'security', 'storage', 'tools', 'windows', ] def convert_none_to_empty_dict(val): """Returns an empty dict if val is None.""" # if val is not a dict or val 'None' return val # and let the validators raise errors later if val is None: return {} return val _FILENAME_RE = re.compile( r'^(?P<namespace>\w+)-(?P<name>\w+)-' r'(?P<version>[0-9a-zA-Z.+-]+)\.tar\.gz$' ) @attr.s(slots=True) class CollectionFilename(object): namespace = attr.ib() name = attr.ib() version = attr.ib(converter=semantic_version.Version) def __str__(self): return f'{self.namespace}-{self.name}-{self.version}.tar.gz' @classmethod def parse(cls, filename): match = _FILENAME_RE.match(filename) if not match: raise ValueError( 'Invalid filename. Expected: ' '{namespace}-{name}-{version}.tar.gz' ) return cls(**match.groupdict()) @namespace.validator @name.validator def _validator(self, attribute, value): if not constants.NAME_REGEXP.match(value): raise ValueError( 'Invalid {0}: {1!r}'.format(attribute.name, value) ) @attr.s(frozen=True) class CollectionInfo(object): """Represents collection_info metadata in collection manifest.""" namespace = attr.ib(default=None) name = attr.ib(default=None) version = attr.ib(default=None) license = attr.ib(factory=list) description = attr.ib(default=None) repository = attr.ib(default=None) documentation = attr.ib(default=None) homepage = attr.ib(default=None) issues = attr.ib(default=None) authors = attr.ib(factory=list) tags = attr.ib(factory=list) license_file = attr.ib(default=None) readme = attr.ib(default=None) dependencies = attr.ib( factory=dict, converter=convert_none_to_empty_dict, validator=attr.validators.instance_of(dict)) @property def label(self): return f"{self.namespace}.{self.name}" @staticmethod def value_error(msg): raise ValueError(f"Invalid collection metadata. {msg}") from None @namespace.validator @name.validator @version.validator @readme.validator @authors.validator @repository.validator def _check_required(self, attribute, value): """Check that value is present.""" if not value: self.value_error(f"'{attribute.name}' is required") @namespace.validator @name.validator def _check_name(self, attribute, value): """Check value against name regular expression.""" if not re.match(constants.NAME_REGEXP, value): self.value_error(f"'{attribute.name}' has invalid format: {value}") @version.validator def _check_version_format(self, attribute, value): """Check that version is in semantic version format.""" if not semantic_version.validate(value): self.value_error( "Expecting 'version' to be in semantic version " f"format, instead found '{value}'.") @authors.validator @tags.validator @license.validator def _check_list_of_str(self, attribute, value): """Check that value is a list of strings.""" err_msg = "Expecting '{attr}' to be a list of strings" if not isinstance(value, list): self.value_error(err_msg.format(attr=attribute.name)) for list_item in value: if not isinstance(list_item, str): self.value_error(err_msg.format(attr=attribute.name)) @license.validator def _check_licenses(self, attribute, value): """Check that all licenses in license list are valid.""" invalid_licenses = [id for id in value if not is_valid_license_id(id)] if invalid_licenses: self.value_error( "Expecting 'license' to be a list of valid SPDX license " "identifiers, instead found invalid license identifiers: '{}' " "in 'license' value {}. " "For more info, visit https://spdx.org" .format(', '.join(invalid_licenses), value)) @dependencies.validator def _check_dependencies_format(self, attribute, dependencies): """Check type and format of dependencies collection and version.""" for collection, version_spec in dependencies.items(): if not isinstance(collection, str): self.value_error("Expecting depencency to be string") if not isinstance(version_spec, str): self.value_error("Expecting depencency version to be string") try: namespace, name = collection.split('.') except ValueError: self.value_error(f"Invalid dependency format: '{collection}'") for value in [namespace, name]: if not re.match(constants.NAME_REGEXP, value): self.value_error( f"Invalid dependency format: '{value}' " f"in '{namespace}.{name}'") if namespace == self.namespace and name == self.name: self.value_error("Cannot have self dependency") try: semantic_version.SimpleSpec(version_spec) except ValueError: self.value_error( "Dependency version spec range invalid: " f"{collection} {version_spec}") @tags.validator def _check_tags(self, attribute, value): """Check max tags and check against both tag regular expression and required tag list.""" if value is not None and len(value) > constants.MAX_TAGS_COUNT: self.value_error( f"Expecting no more than {constants.MAX_TAGS_COUNT} tags " "in metadata") for tag in value: if not re.match(constants.NAME_REGEXP, tag): self.value_error(f"'tag' has invalid format: {tag}") config_data = config.ConfigFile.load() cfg = config.Config(config_data=config_data) if cfg.check_required_tags and (not any(tag in REQUIRED_TAG_LIST for tag in value)): self.value_error( f'At least one tag required from tag list: {', '.join(REQUIRED_TAG_LIST)}' ) @description.validator @repository.validator @documentation.validator @homepage.validator @issues.validator @license_file.validator @repository.validator def _check_non_null_str(self, attribute, value): """Check that if value is present, it must be a string.""" if value is not None and not isinstance(value, str): self.value_error(f"'{attribute.name}' must be a string") def __attrs_post_init__(self): """Checks called post init validation.""" self._check_license_or_license_file() def _check_license_or_license_file(self): """Confirm mutually exclusive presence of license or license_file.""" if bool(self.license) != bool(self.license_file): return if self.license and self.license_file: self.value_error( "The 'license' and 'license_file' keys are mutually exclusive") self.value_error( "Valid values for 'license' or 'license_file' are required. " f"But 'license' ({self.license}) and " f"'license_file' ({self.license_file}) were invalid.") @attr.s(frozen=True) class CollectionArtifactManifest(object): """Represents collection manifest metadata.""" collection_info = attr.ib(type=CollectionInfo) format = attr.ib(default=1) file_manifest_file = attr.ib(factory=dict) @classmethod def parse(cls, data): meta = json.loads(data) col_info = meta.pop('collection_info', None) meta['collection_info'] = CollectionInfo(**col_info) return cls(**meta) @attr.s(frozen=True) class ResultContentItem(object): name = attr.ib() content_type = attr.ib() description = attr.ib() @attr.s(frozen=True) class ImportResult(object): """Result of the import process, collection metadata, and contents.""" metadata = attr.ib(default=None, type=CollectionInfo) docs_blob = attr.ib(factory=dict) contents = attr.ib(factory=list, type=ResultContentItem) custom_license = attr.ib(default=None) @attr.s class Content(object): """Represents content found in a collection.""" name = attr.ib() content_type = attr.ib(type=constants.ContentType) doc_strings = attr.ib(factory=dict) description = attr.ib(default=None) readme_file = attr.ib(default=None) readme_html = attr.ib(default=None) def __attrs_post_init__(self): """Set description if a plugin has doc_strings populated.""" if not self.doc_strings: return if not self.doc_strings.get('doc', None): return self.description = \ self.doc_strings['doc'].get('short_description', None) @attr.s(frozen=True) class RenderedDocFile(object): """Name and html of a documenation file, part of DocsBlob.""" name = attr.ib(default=None) html = attr.ib(default=None) @attr.s(frozen=True) class DocsBlobContentItem(object): """Documenation for piece of content, part of DocsBlob.""" content_name = attr.ib() content_type = attr.ib() doc_strings = attr.ib(factory=dict) readme_file = attr.ib(default=None) readme_html = attr.ib(default=None) @attr.s(frozen=True) class DocsBlob(object): """All documenation that is part of a collection.""" collection_readme = attr.ib(type=RenderedDocFile) documentation_files = attr.ib(factory=list, type=RenderedDocFile) contents = attr.ib(factory=list, type=DocsBlobContentItem)
# (c) 2012-2019, Ansible by Red Hat # # This file is part of Ansible Galaxy # # Ansible Galaxy is free software: you can redistribute it and/or modify # it under the terms of the Apache License as published by # the Apache Software Foundation, either version 2 of the License, or # (at your option) any later version. # # Ansible Galaxy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # Apache License for more details. # # You should have received a copy of the Apache License # along with Galaxy. If not, see <http://www.apache.org/licenses/>. import json import re import attr import semantic_version from galaxy_importer import config from galaxy_importer import constants from galaxy_importer.utils.spdx_licenses import is_valid_license_id SHA1_LEN = 40 REQUIRED_TAG_LIST = [ 'application', 'cloud', 'database', 'infrastructure', 'linux', 'monitoring', 'networking', 'security', 'storage', 'tools', 'windows', ] def convert_none_to_empty_dict(val): """Returns an empty dict if val is None.""" # if val is not a dict or val 'None' return val # and let the validators raise errors later if val is None: return {} return val _FILENAME_RE = re.compile( r'^(?P<namespace>\w+)-(?P<name>\w+)-' r'(?P<version>[0-9a-zA-Z.+-]+)\.tar\.gz$' ) @attr.s(slots=True) class CollectionFilename(object): namespace = attr.ib() name = attr.ib() version = attr.ib(converter=semantic_version.Version) def __str__(self): return f'{self.namespace}-{self.name}-{self.version}.tar.gz' @classmethod def parse(cls, filename): match = _FILENAME_RE.match(filename) if not match: raise ValueError( 'Invalid filename. Expected: ' '{namespace}-{name}-{version}.tar.gz' ) return cls(**match.groupdict()) @namespace.validator @name.validator def _validator(self, attribute, value): if not constants.NAME_REGEXP.match(value): raise ValueError( 'Invalid {0}: {1!r}'.format(attribute.name, value) ) @attr.s(frozen=True) class CollectionInfo(object): """Represents collection_info metadata in collection manifest.""" namespace = attr.ib(default=None) name = attr.ib(default=None) version = attr.ib(default=None) license = attr.ib(factory=list) description = attr.ib(default=None) repository = attr.ib(default=None) documentation = attr.ib(default=None) homepage = attr.ib(default=None) issues = attr.ib(default=None) authors = attr.ib(factory=list) tags = attr.ib(factory=list) license_file = attr.ib(default=None) readme = attr.ib(default=None) dependencies = attr.ib( factory=dict, converter=convert_none_to_empty_dict, validator=attr.validators.instance_of(dict)) @property def label(self): return f"{self.namespace}.{self.name}" @staticmethod def value_error(msg): raise ValueError(f"Invalid collection metadata. {msg}") from None @namespace.validator @name.validator @version.validator @readme.validator @authors.validator @repository.validator def _check_required(self, attribute, value): """Check that value is present.""" if not value: self.value_error(f"'{attribute.name}' is required") @namespace.validator @name.validator def _check_name(self, attribute, value): """Check value against name regular expression.""" if not re.match(constants.NAME_REGEXP, value): self.value_error(f"'{attribute.name}' has invalid format: {value}") @version.validator def _check_version_format(self, attribute, value): """Check that version is in semantic version format.""" if not semantic_version.validate(value): self.value_error( "Expecting 'version' to be in semantic version " f"format, instead found '{value}'.") @authors.validator @tags.validator @license.validator def _check_list_of_str(self, attribute, value): """Check that value is a list of strings.""" err_msg = "Expecting '{attr}' to be a list of strings" if not isinstance(value, list): self.value_error(err_msg.format(attr=attribute.name)) for list_item in value: if not isinstance(list_item, str): self.value_error(err_msg.format(attr=attribute.name)) @license.validator def _check_licenses(self, attribute, value): """Check that all licenses in license list are valid.""" invalid_licenses = [id for id in value if not is_valid_license_id(id)] if invalid_licenses: self.value_error( "Expecting 'license' to be a list of valid SPDX license " "identifiers, instead found invalid license identifiers: '{}' " "in 'license' value {}. " "For more info, visit https://spdx.org" .format(', '.join(invalid_licenses), value)) @dependencies.validator def _check_dependencies_format(self, attribute, dependencies): """Check type and format of dependencies collection and version.""" for collection, version_spec in dependencies.items(): if not isinstance(collection, str): self.value_error("Expecting depencency to be string") if not isinstance(version_spec, str): self.value_error("Expecting depencency version to be string") try: namespace, name = collection.split('.') except ValueError: self.value_error(f"Invalid dependency format: '{collection}'") for value in [namespace, name]: if not re.match(constants.NAME_REGEXP, value): self.value_error( f"Invalid dependency format: '{value}' " f"in '{namespace}.{name}'") if namespace == self.namespace and name == self.name: self.value_error("Cannot have self dependency") try: semantic_version.SimpleSpec(version_spec) except ValueError: self.value_error( "Dependency version spec range invalid: " f"{collection} {version_spec}") @tags.validator def _check_tags(self, attribute, value): """Check max tags and check against both tag regular expression and required tag list.""" if value is not None and len(value) > constants.MAX_TAGS_COUNT: self.value_error( f"Expecting no more than {constants.MAX_TAGS_COUNT} tags " "in metadata") for tag in value: if not re.match(constants.NAME_REGEXP, tag): self.value_error(f"'tag' has invalid format: {tag}") config_data = config.ConfigFile.load() cfg = config.Config(config_data=config_data) if cfg.check_required_tags and (not any(tag in REQUIRED_TAG_LIST for tag in value)): self.value_error( f'At least one tag required from tag list: {", ".join(REQUIRED_TAG_LIST)}' ) @description.validator @repository.validator @documentation.validator @homepage.validator @issues.validator @license_file.validator @repository.validator def _check_non_null_str(self, attribute, value): """Check that if value is present, it must be a string.""" if value is not None and not isinstance(value, str): self.value_error(f"'{attribute.name}' must be a string") def __attrs_post_init__(self): """Checks called post init validation.""" self._check_license_or_license_file() def _check_license_or_license_file(self): """Confirm mutually exclusive presence of license or license_file.""" if bool(self.license) != bool(self.license_file): return if self.license and self.license_file: self.value_error( "The 'license' and 'license_file' keys are mutually exclusive") self.value_error( "Valid values for 'license' or 'license_file' are required. " f"But 'license' ({self.license}) and " f"'license_file' ({self.license_file}) were invalid.") @attr.s(frozen=True) class CollectionArtifactManifest(object): """Represents collection manifest metadata.""" collection_info = attr.ib(type=CollectionInfo) format = attr.ib(default=1) file_manifest_file = attr.ib(factory=dict) @classmethod def parse(cls, data): meta = json.loads(data) col_info = meta.pop('collection_info', None) meta['collection_info'] = CollectionInfo(**col_info) return cls(**meta) @attr.s(frozen=True) class ResultContentItem(object): name = attr.ib() content_type = attr.ib() description = attr.ib() @attr.s(frozen=True) class ImportResult(object): """Result of the import process, collection metadata, and contents.""" metadata = attr.ib(default=None, type=CollectionInfo) docs_blob = attr.ib(factory=dict) contents = attr.ib(factory=list, type=ResultContentItem) custom_license = attr.ib(default=None) @attr.s class Content(object): """Represents content found in a collection.""" name = attr.ib() content_type = attr.ib(type=constants.ContentType) doc_strings = attr.ib(factory=dict) description = attr.ib(default=None) readme_file = attr.ib(default=None) readme_html = attr.ib(default=None) def __attrs_post_init__(self): """Set description if a plugin has doc_strings populated.""" if not self.doc_strings: return if not self.doc_strings.get('doc', None): return self.description = \ self.doc_strings['doc'].get('short_description', None) @attr.s(frozen=True) class RenderedDocFile(object): """Name and html of a documenation file, part of DocsBlob.""" name = attr.ib(default=None) html = attr.ib(default=None) @attr.s(frozen=True) class DocsBlobContentItem(object): """Documenation for piece of content, part of DocsBlob.""" content_name = attr.ib() content_type = attr.ib() doc_strings = attr.ib(factory=dict) readme_file = attr.ib(default=None) readme_html = attr.ib(default=None) @attr.s(frozen=True) class DocsBlob(object): """All documenation that is part of a collection.""" collection_readme = attr.ib(type=RenderedDocFile) documentation_files = attr.ib(factory=list, type=RenderedDocFile) contents = attr.ib(factory=list, type=DocsBlobContentItem)
class pipe(object): def __init__(self): self.buf = [] self.last_output = 0 def next(self): element = self.buf[0] self.buf = self.buf[1:] return element def peek(self): if len(self.buf) == 0: return None return self.buf[0] def write(self,element): self.last_output = element self.buf.append(element) def get_last_output(self): return self.last_output class computer(object): def __init__(self,inpipe, outpipe, debug = False): self.debug = debug self.total_counter = 0 self.relative_base = 0 self.finished = False self.waiting_on_input = False self.cur = 0 self.input_pipe = inpipe self.output_pipe = outpipe self.program = [2,380,379,385,1008,2663,456801,381,1005,381,12,99,109,2664,1101,0,0,383,1101,0,0,382,20101,0,382,1,20102,1,383,2,21102,37,1,0,1105,1,578,4,382,4,383,204,1,1001,382,1,382,1007,382,44,381,1005,381,22,1001,383,1,383,1007,383,23,381,1005,381,18,1006,385,69,99,104,-1,104,0,4,386,3,384,1007,384,0,381,1005,381,94,107,0,384,381,1005,381,108,1106,0,161,107,1,392,381,1006,381,161,1101,-1,0,384,1106,0,119,1007,392,42,381,1006,381,161,1102,1,1,384,21001,392,0,1,21102,1,21,2,21102,1,0,3,21102,138,1,0,1105,1,549,1,392,384,392,21001,392,0,1,21101,0,21,2,21102,1,3,3,21101,0,161,0,1106,0,549,1101,0,0,384,20001,388,390,1,20101,0,389,2,21102,1,180,0,1105,1,578,1206,1,213,1208,1,2,381,1006,381,205,20001,388,390,1,21001,389,0,2,21101,0,205,0,1106,0,393,1002,390,-1,390,1102,1,1,384,20102,1,388,1,20001,389,391,2,21102,1,228,0,1105,1,578,1206,1,261,1208,1,2,381,1006,381,253,20101,0,388,1,20001,389,391,2,21102,1,253,0,1105,1,393,1002,391,-1,391,1101,1,0,384,1005,384,161,20001,388,390,1,20001,389,391,2,21102,1,279,0,1105,1,578,1206,1,316,1208,1,2,381,1006,381,304,20001,388,390,1,20001,389,391,2,21101,0,304,0,1105,1,393,1002,390,-1,390,1002,391,-1,391,1102,1,1,384,1005,384,161,21001,388,0,1,20102,1,389,2,21102,1,0,3,21101,338,0,0,1105,1,549,1,388,390,388,1,389,391,389,20102,1,388,1,20102,1,389,2,21102,1,4,3,21101,0,365,0,1106,0,549,1007,389,22,381,1005,381,75,104,-1,104,0,104,0,99,0,1,0,0,0,0,0,0,315,20,18,1,1,22,109,3,22101,0,-2,1,21202,-1,1,2,21102,0,1,3,21101,0,414,0,1106,0,549,22102,1,-2,1,21202,-1,1,2,21101,429,0,0,1106,0,601,1202,1,1,435,1,386,0,386,104,-1,104,0,4,386,1001,387,-1,387,1005,387,451,99,109,-3,2105,1,0,109,8,22202,-7,-6,-3,22201,-3,-5,-3,21202,-4,64,-2,2207,-3,-2,381,1005,381,492,21202,-2,-1,-1,22201,-3,-1,-3,2207,-3,-2,381,1006,381,481,21202,-4,8,-2,2207,-3,-2,381,1005,381,518,21202,-2,-1,-1,22201,-3,-1,-3,2207,-3,-2,381,1006,381,507,2207,-3,-4,381,1005,381,540,21202,-4,-1,-1,22201,-3,-1,-3,2207,-3,-4,381,1006,381,529,21202,-3,1,-7,109,-8,2106,0,0,109,4,1202,-2,44,566,201,-3,566,566,101,639,566,566,2102,1,-1,0,204,-3,204,-2,204,-1,109,-4,2106,0,0,109,3,1202,-1,44,594,201,-2,594,594,101,639,594,594,20101,0,0,-2,109,-3,2106,0,0,109,3,22102,23,-2,1,22201,1,-1,1,21102,1,509,2,21101,264,0,3,21102,1012,1,4,21102,1,630,0,1106,0,456,21201,1,1651,-2,109,-3,2106,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,2,0,0,2,0,2,0,0,2,0,2,0,0,2,2,0,2,0,2,2,0,0,0,0,2,0,0,2,2,2,0,2,0,2,0,2,2,2,0,1,1,0,0,0,2,2,0,0,0,0,2,2,2,0,0,2,0,2,0,0,0,0,2,2,2,2,0,0,2,0,2,2,2,2,0,0,0,2,0,2,0,0,0,1,1,0,2,0,0,0,2,0,2,2,2,0,0,2,0,2,0,0,0,0,2,2,0,0,2,2,0,0,2,0,0,0,0,2,2,0,2,0,0,2,0,0,0,1,1,0,0,2,0,2,2,0,0,0,2,0,2,2,2,0,2,0,2,2,2,2,0,2,2,2,0,0,0,0,0,2,0,2,0,2,0,2,2,0,0,2,0,1,1,0,0,0,0,0,2,0,2,2,0,0,2,0,2,0,2,2,0,0,0,0,2,2,0,2,2,2,0,2,2,0,2,2,0,0,2,2,0,0,0,2,0,1,1,0,0,2,2,2,0,0,0,0,0,2,2,0,0,2,0,0,0,2,2,0,2,2,0,0,2,0,0,2,0,0,0,2,2,0,2,2,2,0,2,0,0,1,1,0,0,2,0,0,2,0,2,2,2,2,2,0,2,0,0,0,0,2,2,2,2,2,2,2,0,2,2,2,2,2,2,2,0,2,0,0,0,2,2,2,0,1,1,0,2,0,0,2,2,2,0,2,2,2,0,2,2,0,0,0,2,0,2,2,0,2,0,2,2,2,0,2,0,0,0,0,0,0,2,0,2,2,2,2,0,1,1,0,0,2,2,2,2,0,2,2,2,2,0,2,2,2,0,0,0,2,0,0,0,0,0,2,2,2,2,2,2,0,0,2,0,2,2,2,2,2,0,0,0,1,1,0,0,2,2,2,2,2,0,0,2,2,0,0,2,2,0,2,0,0,0,0,2,2,0,2,0,2,2,2,2,0,2,0,0,2,2,0,2,2,0,2,0,1,1,0,2,0,2,2,0,0,2,0,2,0,2,2,0,2,0,2,0,2,2,0,0,2,2,2,2,0,2,2,2,0,2,2,0,0,0,2,2,0,0,2,0,1,1,0,2,0,0,2,2,2,2,2,0,0,2,0,0,2,2,2,0,2,0,2,0,2,0,0,0,2,0,0,0,0,2,2,2,2,0,2,2,0,0,0,0,1,1,0,0,2,0,0,2,2,0,2,0,2,0,2,2,0,0,2,0,2,0,0,2,2,2,2,2,2,0,0,0,0,2,2,2,2,2,0,2,0,2,2,0,1,1,0,0,0,2,2,2,2,2,2,2,0,0,2,0,0,0,0,2,0,2,2,2,2,0,2,0,2,0,0,2,0,2,2,2,2,0,0,0,2,2,2,0,1,1,0,0,0,2,2,0,2,0,2,2,0,2,0,2,0,0,0,2,0,2,0,2,2,0,2,0,0,2,0,0,2,0,2,0,2,2,2,0,0,2,2,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,4,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,78,97,10,89,31,40,53,97,63,60,92,10,54,27,53,42,36,34,79,30,8,70,22,20,18,67,79,30,81,50,67,46,39,15,72,26,35,61,6,36,2,26,65,94,82,27,37,6,71,66,84,19,69,5,62,89,57,49,1,9,59,67,30,74,71,37,66,77,43,4,59,42,85,4,87,1,24,64,85,25,29,67,97,15,22,6,34,97,97,47,22,19,40,89,45,36,93,77,26,85,30,40,65,21,45,91,18,77,45,13,74,18,47,67,79,1,31,22,1,96,94,60,44,56,79,64,74,56,91,79,41,23,9,57,9,86,63,82,55,92,63,63,94,73,76,40,88,18,26,66,29,27,20,1,94,90,43,11,67,33,27,47,34,73,65,67,77,54,92,84,6,29,41,8,8,38,83,36,74,29,26,70,68,57,54,38,75,37,24,64,30,89,43,61,6,4,65,81,39,85,91,22,28,17,47,95,52,40,76,77,81,52,59,19,37,90,23,33,5,82,3,64,46,70,22,24,9,96,97,69,48,66,58,97,51,15,86,6,23,7,35,52,57,3,79,82,71,87,64,91,93,69,77,95,1,57,5,2,65,35,57,14,35,12,14,60,45,52,67,32,26,93,63,54,45,8,48,83,5,49,95,60,78,98,54,62,9,1,39,57,63,82,52,90,64,38,95,8,12,72,22,53,78,63,72,65,59,1,87,95,81,79,38,92,61,60,59,3,39,31,47,69,70,6,55,44,49,54,49,50,11,87,85,89,15,70,58,5,87,65,79,86,92,98,49,73,8,79,30,55,4,30,11,55,80,28,63,28,33,9,49,70,34,83,29,97,67,65,89,50,88,29,40,5,3,11,87,85,43,2,51,18,58,39,81,8,15,2,42,95,64,8,76,60,73,67,30,28,11,84,56,73,14,66,43,21,40,31,48,11,65,27,9,37,60,91,34,11,83,45,9,77,70,97,9,13,68,20,17,15,6,13,44,59,51,91,73,60,37,40,18,69,48,14,44,96,71,21,27,90,9,91,14,80,38,69,69,52,28,15,54,63,46,32,78,54,79,95,83,16,44,29,26,92,31,51,66,14,94,49,1,93,43,57,50,82,45,95,83,74,50,87,47,55,62,31,1,88,1,77,59,64,26,48,22,61,56,20,54,59,62,3,59,28,98,45,53,47,72,73,72,43,30,23,94,10,76,63,63,8,30,92,25,61,61,32,64,25,57,61,95,81,23,67,28,59,48,68,21,85,48,32,93,98,50,89,27,46,38,63,38,87,76,76,10,71,36,91,2,47,2,36,37,90,25,97,27,71,67,77,4,11,57,68,87,94,12,83,91,94,92,35,49,46,4,31,64,39,12,92,26,12,75,29,11,5,83,8,23,73,62,74,55,75,38,40,90,73,71,38,15,75,10,38,55,74,82,13,32,55,90,47,6,25,65,88,85,40,13,66,54,39,82,19,15,18,74,19,54,70,30,56,28,2,20,50,44,51,7,4,79,97,90,71,97,5,25,95,22,36,61,30,16,68,61,23,22,60,93,9,92,98,40,41,11,47,7,57,15,51,51,77,22,32,4,27,10,76,76,50,81,96,46,28,38,69,41,43,47,86,66,54,22,33,45,75,75,51,37,62,62,25,71,35,49,93,44,18,92,39,32,11,31,96,2,33,94,45,14,82,57,79,81,57,6,19,63,35,11,55,18,38,22,43,82,76,35,7,21,74,50,83,7,55,94,23,79,85,20,4,65,18,12,62,35,74,23,20,96,71,25,95,45,95,4,18,82,71,79,4,12,41,44,23,8,86,6,78,5,54,68,60,12,73,18,95,31,86,23,5,36,40,97,35,48,28,15,9,27,54,14,22,97,63,41,37,12,20,38,41,27,70,35,10,89,31,90,44,46,44,49,66,71,58,74,7,24,6,96,68,27,16,89,80,1,38,26,88,60,47,27,46,32,34,44,74,51,70,13,57,14,31,40,71,55,22,87,23,9,37,38,18,17,34,84,84,49,74,81,31,4,45,11,71,89,16,56,91,61,61,67,92,14,88,89,10,11,77,38,40,89,76,7,5,74,54,64,97,25,20,1,41,9,41,97,1,31,21,96,98,88,52,71,25,62,42,8,91,84,43,75,37,22,32,58,87,22,6,13,62,48,85,81,48,70,3,13,93,88,52,7,66,84,27,37,21,62,72,40,30,28,12,88,48,47,96,98,47,76,80,98,42,25,72,13,15,31,81,40,16,85,77,82,41,67,93,73,58,86,68,85,28,60,13,87,9,12,40,20,4,92,51,456801] def write_positional(self,position,argument): while position >= len(self.program): self.program.append(0) self.program[position] = argument def read_positional(self,position): while position >= len(self.program): self.program.append(0) return self.program[position] def read_absolute(self,position): return position def write_relative_base(self,position,argument): while position+self.relative_base >= len(self.program): self.program.append(0) self.program[position+self.relative_base] = argument def read_relative_base(self,position): while position+self.relative_base >= len(self.program): self.program.append(0) return self.program[position+self.relative_base] def get_accessors(self,mode): if mode == 0: #positional mode return (lambda a,b: self.write_positional(a,b),lambda a: self.read_positional(a)) if mode == 1: #absolute mode return (lambda a,b: None,lambda a: self.read_absolute(a)) if mode == 2: #relative base mode return (lambda a,b: self.write_relative_base(a,b), lambda a: self.read_relative_base(a)) def get_opcode(self,code): strcode = str(code) while len(strcode) < 5: strcode = '0' + strcode return (int(strcode[0]), int(strcode[1]), int(strcode[2]), int(strcode[3])*10+int(strcode[4])) def is_finished(self): return self.finished def calculate(self): self.total_counter = self.total_counter + 1 if self.program[self.cur] == 99: if self.debug: print('Exit') self.finished = True return while self.cur+3 >= len(self.program): self.program.append(0) (third_mode, second_mode, first_mode, opcode) = self.get_opcode(self.program[self.cur]) if self.debug: print(f'[{self.total_counter}|{self.cur}]: {self.program[self.cur]} = first:{'positional' if first_mode == 0 else 'absolute' if first_mode == 1 else 'relative base'}, second:{'positional' if second_mode == 0 else 'absolute' if second_mode == 1 else 'relative base'}, third:{'positional' if third_mode == 0 else 'absolute' if third_mode == 1 else 'relative base'} opcode:{opcode}') first_accessors = self.get_accessors(first_mode) (first_write,first_read) = first_accessors second_accessors = self.get_accessors(second_mode) (second_write,second_read) = second_accessors third_accessors = self.get_accessors(third_mode) third_write, third_read = third_accessors first_node = self.program[self.cur+1] second_node = self.program[self.cur+2] third_node = self.program[self.cur+3] if opcode == 1: if self.debug: print('add') if self.debug: print(f'{self.program[self.cur]},{self.program[self.cur+1]},{self.program[self.cur+2]},{self.program[self.cur+3]}') third_write(third_node, first_read(first_node)+second_read(second_node)) if self.debug: print(f'{first_read(first_node)}+{second_read(second_node)}->program[{third_node}]') self.cur = self.cur + 4 elif opcode == 2: if self.debug: print('multiply') if self.debug: print(f'{self.program[self.cur]},{self.program[self.cur+1]},{self.program[self.cur+2]},{self.program[self.cur+3]}') third_write(third_node, first_read(first_node)*second_read(second_node)) if self.debug: print(f'{first_read(first_node)}*{second_read(second_node)}->program[{third_node}]') self.cur = self.cur + 4 elif opcode == 3: #input if self.debug: print('input') if self.debug: print(f'{self.program[self.cur]},{self.program[self.cur+1]}') if self.input_pipe.peek() is None: self.waiting_on_input = True return self.waiting_on_input = False next_input = self.input_pipe.next() first_write(first_node,next_input) if self.debug: print(f'input:{next_input}') self.cur = self.cur + 2 elif opcode == 4: #output if self.debug: print('output') if self.debug: print(f'{self.program[self.cur]},{self.program[self.cur+1]}') output_value = first_read(first_node) self.output_pipe.write(output_value) if self.debug: print(f'out: {output_value}') self.cur = self.cur + 2 elif opcode == 5: #jump if not zero if self.debug: print('jump if not zero') if self.debug: print(f'{self.program[self.cur]},{self.program[self.cur+1]},{self.program[self.cur+2]}') self.cur = second_read(second_node) if first_read(first_node) != 0 else self.cur + 3 if self.debug: if first_read(first_node) != 0: print(f'{first_read(first_node)} != 0, cur = {second_read(second_node)}') else: print(f'{first_read(first_node)} == 0, no jump') elif opcode == 6: #jump if zero if self.debug: print('jump if zero') if self.debug: print(f'{self.program[self.cur]},{self.program[self.cur+1]},{self.program[self.cur+2]}') self.cur = second_read(second_node) if first_read(first_node) == 0 else self.cur + 3 if self.debug: if first_read(first_node) == 0: print(f'{first_read(first_node)} == 0, cur = {second_read(second_node)}') else: print(f'{first_read(first_node)} != 0, no jump') elif opcode == 7: #less than if self.debug: print('less than') if self.debug: print(f'{self.program[self.cur]},{self.program[self.cur+1]},{self.program[self.cur+2]},{self.program[self.cur+3]}') third_write(third_node, 1 if first_read(first_node) < second_read(second_node) else 0) if self.debug: if first_read(first_node) < second_read(second_node): print(f'{first_read(first_node)} < {second_read(second_node)}, position {third_node} = 1') else: print(f'{first_read(first_node)} >= {second_read(second_node)}, position {third_node} = 0') self.cur = self.cur + 4 elif opcode == 8: #equal if self.debug: print('equal') if self.debug: print(f'{self.program[self.cur]},{self.program[self.cur+1]},{self.program[self.cur+2]},{self.program[self.cur+3]}') third_write(third_node, 1 if first_read(first_node) == second_read(second_node) else 0) if self.debug: if first_read(first_node) == second_read(second_node): print(f'{first_read(first_node)} == {second_read(second_node)}, position {third_node} = 1') else: print(f'{first_read(first_node)} != {second_read(second_node)}, position {third_node} = 0') self.cur = self.cur + 4 elif opcode == 9: if self.debug: print('alter relative base') if self.debug: print(f'{self.program[self.cur]},{self.program[self.cur+1]}') if self.debug: print(f'relative base {self.relative_base} +/- {first_read(first_node)} = {self.relative_base+first_read(first_node)}') self.relative_base = self.relative_base+first_read(first_node) self.cur = self.cur + 2 else: raise Exception(self.program[self.cur]) x_min = 0 y_min = 0 x_max = 0 y_max = 0 screen = {} score = 0 ball_position = (0,0) paddle_position = (0,0) inpipe = pipe() outpipe = pipe() computer = computer(inpipe,outpipe, False) while not computer.finished: if len(outpipe.buf) == 3: x = outpipe.next() x_min = min(x,x_min) x_max = max(x,x_max) y = outpipe.next() y_min = min(y,y_min) y_max = max(y,y_max) instruction = outpipe.next() if instruction == 0: screen[(x,y)] = ' ' elif instruction == 1: screen[(x,y)] = '|' elif instruction == 2: screen[(x,y)] = '#' if instruction == 3: screen[(x,y)] = '-' paddle_position = (x,y) elif instruction == 4: screen[(x,y)] = '0' ball_position = (x,y) else: score = instruction if computer.waiting_on_input: #buf = '' #for y in range(y_min,y_max+1): # for x in range(x_min,x_max+1): # if (x,y) not in screen: # buf += ' ' # else: # buf += screen[(x,y)] # buf += '\n' #print(buf) if ball_position[0] < paddle_position[0]: inpipe.write(-1) elif ball_position[0] > paddle_position[0]: inpipe.write(1) else: inpipe.write(0) computer.calculate() print(score) pass
class pipe(object): def __init__(self): self.buf = [] self.last_output = 0 def next(self): element = self.buf[0] self.buf = self.buf[1:] return element def peek(self): if len(self.buf) == 0: return None return self.buf[0] def write(self,element): self.last_output = element self.buf.append(element) def get_last_output(self): return self.last_output class computer(object): def __init__(self,inpipe, outpipe, debug = False): self.debug = debug self.total_counter = 0 self.relative_base = 0 self.finished = False self.waiting_on_input = False self.cur = 0 self.input_pipe = inpipe self.output_pipe = outpipe self.program = [2,380,379,385,1008,2663,456801,381,1005,381,12,99,109,2664,1101,0,0,383,1101,0,0,382,20101,0,382,1,20102,1,383,2,21102,37,1,0,1105,1,578,4,382,4,383,204,1,1001,382,1,382,1007,382,44,381,1005,381,22,1001,383,1,383,1007,383,23,381,1005,381,18,1006,385,69,99,104,-1,104,0,4,386,3,384,1007,384,0,381,1005,381,94,107,0,384,381,1005,381,108,1106,0,161,107,1,392,381,1006,381,161,1101,-1,0,384,1106,0,119,1007,392,42,381,1006,381,161,1102,1,1,384,21001,392,0,1,21102,1,21,2,21102,1,0,3,21102,138,1,0,1105,1,549,1,392,384,392,21001,392,0,1,21101,0,21,2,21102,1,3,3,21101,0,161,0,1106,0,549,1101,0,0,384,20001,388,390,1,20101,0,389,2,21102,1,180,0,1105,1,578,1206,1,213,1208,1,2,381,1006,381,205,20001,388,390,1,21001,389,0,2,21101,0,205,0,1106,0,393,1002,390,-1,390,1102,1,1,384,20102,1,388,1,20001,389,391,2,21102,1,228,0,1105,1,578,1206,1,261,1208,1,2,381,1006,381,253,20101,0,388,1,20001,389,391,2,21102,1,253,0,1105,1,393,1002,391,-1,391,1101,1,0,384,1005,384,161,20001,388,390,1,20001,389,391,2,21102,1,279,0,1105,1,578,1206,1,316,1208,1,2,381,1006,381,304,20001,388,390,1,20001,389,391,2,21101,0,304,0,1105,1,393,1002,390,-1,390,1002,391,-1,391,1102,1,1,384,1005,384,161,21001,388,0,1,20102,1,389,2,21102,1,0,3,21101,338,0,0,1105,1,549,1,388,390,388,1,389,391,389,20102,1,388,1,20102,1,389,2,21102,1,4,3,21101,0,365,0,1106,0,549,1007,389,22,381,1005,381,75,104,-1,104,0,104,0,99,0,1,0,0,0,0,0,0,315,20,18,1,1,22,109,3,22101,0,-2,1,21202,-1,1,2,21102,0,1,3,21101,0,414,0,1106,0,549,22102,1,-2,1,21202,-1,1,2,21101,429,0,0,1106,0,601,1202,1,1,435,1,386,0,386,104,-1,104,0,4,386,1001,387,-1,387,1005,387,451,99,109,-3,2105,1,0,109,8,22202,-7,-6,-3,22201,-3,-5,-3,21202,-4,64,-2,2207,-3,-2,381,1005,381,492,21202,-2,-1,-1,22201,-3,-1,-3,2207,-3,-2,381,1006,381,481,21202,-4,8,-2,2207,-3,-2,381,1005,381,518,21202,-2,-1,-1,22201,-3,-1,-3,2207,-3,-2,381,1006,381,507,2207,-3,-4,381,1005,381,540,21202,-4,-1,-1,22201,-3,-1,-3,2207,-3,-4,381,1006,381,529,21202,-3,1,-7,109,-8,2106,0,0,109,4,1202,-2,44,566,201,-3,566,566,101,639,566,566,2102,1,-1,0,204,-3,204,-2,204,-1,109,-4,2106,0,0,109,3,1202,-1,44,594,201,-2,594,594,101,639,594,594,20101,0,0,-2,109,-3,2106,0,0,109,3,22102,23,-2,1,22201,1,-1,1,21102,1,509,2,21101,264,0,3,21102,1012,1,4,21102,1,630,0,1106,0,456,21201,1,1651,-2,109,-3,2106,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,2,0,0,2,0,2,0,0,2,0,2,0,0,2,2,0,2,0,2,2,0,0,0,0,2,0,0,2,2,2,0,2,0,2,0,2,2,2,0,1,1,0,0,0,2,2,0,0,0,0,2,2,2,0,0,2,0,2,0,0,0,0,2,2,2,2,0,0,2,0,2,2,2,2,0,0,0,2,0,2,0,0,0,1,1,0,2,0,0,0,2,0,2,2,2,0,0,2,0,2,0,0,0,0,2,2,0,0,2,2,0,0,2,0,0,0,0,2,2,0,2,0,0,2,0,0,0,1,1,0,0,2,0,2,2,0,0,0,2,0,2,2,2,0,2,0,2,2,2,2,0,2,2,2,0,0,0,0,0,2,0,2,0,2,0,2,2,0,0,2,0,1,1,0,0,0,0,0,2,0,2,2,0,0,2,0,2,0,2,2,0,0,0,0,2,2,0,2,2,2,0,2,2,0,2,2,0,0,2,2,0,0,0,2,0,1,1,0,0,2,2,2,0,0,0,0,0,2,2,0,0,2,0,0,0,2,2,0,2,2,0,0,2,0,0,2,0,0,0,2,2,0,2,2,2,0,2,0,0,1,1,0,0,2,0,0,2,0,2,2,2,2,2,0,2,0,0,0,0,2,2,2,2,2,2,2,0,2,2,2,2,2,2,2,0,2,0,0,0,2,2,2,0,1,1,0,2,0,0,2,2,2,0,2,2,2,0,2,2,0,0,0,2,0,2,2,0,2,0,2,2,2,0,2,0,0,0,0,0,0,2,0,2,2,2,2,0,1,1,0,0,2,2,2,2,0,2,2,2,2,0,2,2,2,0,0,0,2,0,0,0,0,0,2,2,2,2,2,2,0,0,2,0,2,2,2,2,2,0,0,0,1,1,0,0,2,2,2,2,2,0,0,2,2,0,0,2,2,0,2,0,0,0,0,2,2,0,2,0,2,2,2,2,0,2,0,0,2,2,0,2,2,0,2,0,1,1,0,2,0,2,2,0,0,2,0,2,0,2,2,0,2,0,2,0,2,2,0,0,2,2,2,2,0,2,2,2,0,2,2,0,0,0,2,2,0,0,2,0,1,1,0,2,0,0,2,2,2,2,2,0,0,2,0,0,2,2,2,0,2,0,2,0,2,0,0,0,2,0,0,0,0,2,2,2,2,0,2,2,0,0,0,0,1,1,0,0,2,0,0,2,2,0,2,0,2,0,2,2,0,0,2,0,2,0,0,2,2,2,2,2,2,0,0,0,0,2,2,2,2,2,0,2,0,2,2,0,1,1,0,0,0,2,2,2,2,2,2,2,0,0,2,0,0,0,0,2,0,2,2,2,2,0,2,0,2,0,0,2,0,2,2,2,2,0,0,0,2,2,2,0,1,1,0,0,0,2,2,0,2,0,2,2,0,2,0,2,0,0,0,2,0,2,0,2,2,0,2,0,0,2,0,0,2,0,2,0,2,2,2,0,0,2,2,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,4,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,78,97,10,89,31,40,53,97,63,60,92,10,54,27,53,42,36,34,79,30,8,70,22,20,18,67,79,30,81,50,67,46,39,15,72,26,35,61,6,36,2,26,65,94,82,27,37,6,71,66,84,19,69,5,62,89,57,49,1,9,59,67,30,74,71,37,66,77,43,4,59,42,85,4,87,1,24,64,85,25,29,67,97,15,22,6,34,97,97,47,22,19,40,89,45,36,93,77,26,85,30,40,65,21,45,91,18,77,45,13,74,18,47,67,79,1,31,22,1,96,94,60,44,56,79,64,74,56,91,79,41,23,9,57,9,86,63,82,55,92,63,63,94,73,76,40,88,18,26,66,29,27,20,1,94,90,43,11,67,33,27,47,34,73,65,67,77,54,92,84,6,29,41,8,8,38,83,36,74,29,26,70,68,57,54,38,75,37,24,64,30,89,43,61,6,4,65,81,39,85,91,22,28,17,47,95,52,40,76,77,81,52,59,19,37,90,23,33,5,82,3,64,46,70,22,24,9,96,97,69,48,66,58,97,51,15,86,6,23,7,35,52,57,3,79,82,71,87,64,91,93,69,77,95,1,57,5,2,65,35,57,14,35,12,14,60,45,52,67,32,26,93,63,54,45,8,48,83,5,49,95,60,78,98,54,62,9,1,39,57,63,82,52,90,64,38,95,8,12,72,22,53,78,63,72,65,59,1,87,95,81,79,38,92,61,60,59,3,39,31,47,69,70,6,55,44,49,54,49,50,11,87,85,89,15,70,58,5,87,65,79,86,92,98,49,73,8,79,30,55,4,30,11,55,80,28,63,28,33,9,49,70,34,83,29,97,67,65,89,50,88,29,40,5,3,11,87,85,43,2,51,18,58,39,81,8,15,2,42,95,64,8,76,60,73,67,30,28,11,84,56,73,14,66,43,21,40,31,48,11,65,27,9,37,60,91,34,11,83,45,9,77,70,97,9,13,68,20,17,15,6,13,44,59,51,91,73,60,37,40,18,69,48,14,44,96,71,21,27,90,9,91,14,80,38,69,69,52,28,15,54,63,46,32,78,54,79,95,83,16,44,29,26,92,31,51,66,14,94,49,1,93,43,57,50,82,45,95,83,74,50,87,47,55,62,31,1,88,1,77,59,64,26,48,22,61,56,20,54,59,62,3,59,28,98,45,53,47,72,73,72,43,30,23,94,10,76,63,63,8,30,92,25,61,61,32,64,25,57,61,95,81,23,67,28,59,48,68,21,85,48,32,93,98,50,89,27,46,38,63,38,87,76,76,10,71,36,91,2,47,2,36,37,90,25,97,27,71,67,77,4,11,57,68,87,94,12,83,91,94,92,35,49,46,4,31,64,39,12,92,26,12,75,29,11,5,83,8,23,73,62,74,55,75,38,40,90,73,71,38,15,75,10,38,55,74,82,13,32,55,90,47,6,25,65,88,85,40,13,66,54,39,82,19,15,18,74,19,54,70,30,56,28,2,20,50,44,51,7,4,79,97,90,71,97,5,25,95,22,36,61,30,16,68,61,23,22,60,93,9,92,98,40,41,11,47,7,57,15,51,51,77,22,32,4,27,10,76,76,50,81,96,46,28,38,69,41,43,47,86,66,54,22,33,45,75,75,51,37,62,62,25,71,35,49,93,44,18,92,39,32,11,31,96,2,33,94,45,14,82,57,79,81,57,6,19,63,35,11,55,18,38,22,43,82,76,35,7,21,74,50,83,7,55,94,23,79,85,20,4,65,18,12,62,35,74,23,20,96,71,25,95,45,95,4,18,82,71,79,4,12,41,44,23,8,86,6,78,5,54,68,60,12,73,18,95,31,86,23,5,36,40,97,35,48,28,15,9,27,54,14,22,97,63,41,37,12,20,38,41,27,70,35,10,89,31,90,44,46,44,49,66,71,58,74,7,24,6,96,68,27,16,89,80,1,38,26,88,60,47,27,46,32,34,44,74,51,70,13,57,14,31,40,71,55,22,87,23,9,37,38,18,17,34,84,84,49,74,81,31,4,45,11,71,89,16,56,91,61,61,67,92,14,88,89,10,11,77,38,40,89,76,7,5,74,54,64,97,25,20,1,41,9,41,97,1,31,21,96,98,88,52,71,25,62,42,8,91,84,43,75,37,22,32,58,87,22,6,13,62,48,85,81,48,70,3,13,93,88,52,7,66,84,27,37,21,62,72,40,30,28,12,88,48,47,96,98,47,76,80,98,42,25,72,13,15,31,81,40,16,85,77,82,41,67,93,73,58,86,68,85,28,60,13,87,9,12,40,20,4,92,51,456801] def write_positional(self,position,argument): while position >= len(self.program): self.program.append(0) self.program[position] = argument def read_positional(self,position): while position >= len(self.program): self.program.append(0) return self.program[position] def read_absolute(self,position): return position def write_relative_base(self,position,argument): while position+self.relative_base >= len(self.program): self.program.append(0) self.program[position+self.relative_base] = argument def read_relative_base(self,position): while position+self.relative_base >= len(self.program): self.program.append(0) return self.program[position+self.relative_base] def get_accessors(self,mode): if mode == 0: #positional mode return (lambda a,b: self.write_positional(a,b),lambda a: self.read_positional(a)) if mode == 1: #absolute mode return (lambda a,b: None,lambda a: self.read_absolute(a)) if mode == 2: #relative base mode return (lambda a,b: self.write_relative_base(a,b), lambda a: self.read_relative_base(a)) def get_opcode(self,code): strcode = str(code) while len(strcode) < 5: strcode = '0' + strcode return (int(strcode[0]), int(strcode[1]), int(strcode[2]), int(strcode[3])*10+int(strcode[4])) def is_finished(self): return self.finished def calculate(self): self.total_counter = self.total_counter + 1 if self.program[self.cur] == 99: if self.debug: print('Exit') self.finished = True return while self.cur+3 >= len(self.program): self.program.append(0) (third_mode, second_mode, first_mode, opcode) = self.get_opcode(self.program[self.cur]) if self.debug: print(f'[{self.total_counter}|{self.cur}]: {self.program[self.cur]} = first:{"positional" if first_mode == 0 else "absolute" if first_mode == 1 else "relative base"}, second:{"positional" if second_mode == 0 else "absolute" if second_mode == 1 else "relative base"}, third:{"positional" if third_mode == 0 else "absolute" if third_mode == 1 else "relative base"} opcode:{opcode}') first_accessors = self.get_accessors(first_mode) (first_write,first_read) = first_accessors second_accessors = self.get_accessors(second_mode) (second_write,second_read) = second_accessors third_accessors = self.get_accessors(third_mode) third_write, third_read = third_accessors first_node = self.program[self.cur+1] second_node = self.program[self.cur+2] third_node = self.program[self.cur+3] if opcode == 1: if self.debug: print('add') if self.debug: print(f'{self.program[self.cur]},{self.program[self.cur+1]},{self.program[self.cur+2]},{self.program[self.cur+3]}') third_write(third_node, first_read(first_node)+second_read(second_node)) if self.debug: print(f'{first_read(first_node)}+{second_read(second_node)}->program[{third_node}]') self.cur = self.cur + 4 elif opcode == 2: if self.debug: print('multiply') if self.debug: print(f'{self.program[self.cur]},{self.program[self.cur+1]},{self.program[self.cur+2]},{self.program[self.cur+3]}') third_write(third_node, first_read(first_node)*second_read(second_node)) if self.debug: print(f'{first_read(first_node)}*{second_read(second_node)}->program[{third_node}]') self.cur = self.cur + 4 elif opcode == 3: #input if self.debug: print('input') if self.debug: print(f'{self.program[self.cur]},{self.program[self.cur+1]}') if self.input_pipe.peek() is None: self.waiting_on_input = True return self.waiting_on_input = False next_input = self.input_pipe.next() first_write(first_node,next_input) if self.debug: print(f'input:{next_input}') self.cur = self.cur + 2 elif opcode == 4: #output if self.debug: print('output') if self.debug: print(f'{self.program[self.cur]},{self.program[self.cur+1]}') output_value = first_read(first_node) self.output_pipe.write(output_value) if self.debug: print(f'out: {output_value}') self.cur = self.cur + 2 elif opcode == 5: #jump if not zero if self.debug: print('jump if not zero') if self.debug: print(f'{self.program[self.cur]},{self.program[self.cur+1]},{self.program[self.cur+2]}') self.cur = second_read(second_node) if first_read(first_node) != 0 else self.cur + 3 if self.debug: if first_read(first_node) != 0: print(f'{first_read(first_node)} != 0, cur = {second_read(second_node)}') else: print(f'{first_read(first_node)} == 0, no jump') elif opcode == 6: #jump if zero if self.debug: print('jump if zero') if self.debug: print(f'{self.program[self.cur]},{self.program[self.cur+1]},{self.program[self.cur+2]}') self.cur = second_read(second_node) if first_read(first_node) == 0 else self.cur + 3 if self.debug: if first_read(first_node) == 0: print(f'{first_read(first_node)} == 0, cur = {second_read(second_node)}') else: print(f'{first_read(first_node)} != 0, no jump') elif opcode == 7: #less than if self.debug: print('less than') if self.debug: print(f'{self.program[self.cur]},{self.program[self.cur+1]},{self.program[self.cur+2]},{self.program[self.cur+3]}') third_write(third_node, 1 if first_read(first_node) < second_read(second_node) else 0) if self.debug: if first_read(first_node) < second_read(second_node): print(f'{first_read(first_node)} < {second_read(second_node)}, position {third_node} = 1') else: print(f'{first_read(first_node)} >= {second_read(second_node)}, position {third_node} = 0') self.cur = self.cur + 4 elif opcode == 8: #equal if self.debug: print('equal') if self.debug: print(f'{self.program[self.cur]},{self.program[self.cur+1]},{self.program[self.cur+2]},{self.program[self.cur+3]}') third_write(third_node, 1 if first_read(first_node) == second_read(second_node) else 0) if self.debug: if first_read(first_node) == second_read(second_node): print(f'{first_read(first_node)} == {second_read(second_node)}, position {third_node} = 1') else: print(f'{first_read(first_node)} != {second_read(second_node)}, position {third_node} = 0') self.cur = self.cur + 4 elif opcode == 9: if self.debug: print('alter relative base') if self.debug: print(f'{self.program[self.cur]},{self.program[self.cur+1]}') if self.debug: print(f'relative base {self.relative_base} +/- {first_read(first_node)} = {self.relative_base+first_read(first_node)}') self.relative_base = self.relative_base+first_read(first_node) self.cur = self.cur + 2 else: raise Exception(self.program[self.cur]) x_min = 0 y_min = 0 x_max = 0 y_max = 0 screen = {} score = 0 ball_position = (0,0) paddle_position = (0,0) inpipe = pipe() outpipe = pipe() computer = computer(inpipe,outpipe, False) while not computer.finished: if len(outpipe.buf) == 3: x = outpipe.next() x_min = min(x,x_min) x_max = max(x,x_max) y = outpipe.next() y_min = min(y,y_min) y_max = max(y,y_max) instruction = outpipe.next() if instruction == 0: screen[(x,y)] = ' ' elif instruction == 1: screen[(x,y)] = '|' elif instruction == 2: screen[(x,y)] = '#' if instruction == 3: screen[(x,y)] = '-' paddle_position = (x,y) elif instruction == 4: screen[(x,y)] = '0' ball_position = (x,y) else: score = instruction if computer.waiting_on_input: #buf = '' #for y in range(y_min,y_max+1): # for x in range(x_min,x_max+1): # if (x,y) not in screen: # buf += ' ' # else: # buf += screen[(x,y)] # buf += '\n' #print(buf) if ball_position[0] < paddle_position[0]: inpipe.write(-1) elif ball_position[0] > paddle_position[0]: inpipe.write(1) else: inpipe.write(0) computer.calculate() print(score) pass
# -*- coding: utf-8 -*- from ..interpreter import model, NodeContext # Python models for autorally_gazebo # Derived from: https://github.com/AutoRally/autorally/tree/melodic-devel/autorally_gazebo/nodes @model('autorally_gazebo', 'autorally_controller.py') def autorally_controller(c: NodeContext) -> None: c.read("~left_front_wheel/steering_link_name", "left_steering_link") c.read("~right_front_wheel/steering_link_name", "right_steering_link") left_steering_controller_name = \ c.read("~left_front_wheel/steering_controller_name", "left_steering_controller") assert isinstance(left_steering_controller_name, str) right_steering_controller_name = \ c.read("~right_front_wheel/steering_controller_name", "right_steering_controller") assert isinstance(right_steering_controller_name, str) c.read("~left_rear_wheel/link_name", "left_wheel") c.read("~right_rear_wheel/link_name", "right_wheel") left_front_axle_controller_name = \ c.read("~left_front_wheel/axle_controller_name") assert isinstance(left_front_axle_controller_name, str) right_front_axle_controller_name = \ c.read("~right_front_wheel/axle_controller_name") assert isinstance(right_front_axle_controller_name, str) left_rear_axle_controller_name = \ c.read("~left_rear_wheel/axle_controller_name") assert isinstance(left_rear_axle_controller_name, str) right_rear_axle_controller_name = \ c.read("~right_rear_wheel/axle_controller_name") assert isinstance(right_rear_axle_controller_name, str) c.read("~left_front_wheel/diameter", 1.0) c.read("~right_front_wheel/diameter") c.read("~left_rear_wheel/diameter") c.read("~right_rear_wheel/diameter") # https://github.com/AutoRally/autorally/blob/c2692f2970da6874ad9ddfeea3908adaf05b4b09/autorally_gazebo/nodes/autorally_controller.py#L258 chassis_command_priorities = \ c.read("~chassisCommandProirities", []) # Note, misspelling is deliberate shock_absorbers = c.read("~shock_absorbers", []) c.read("~cmd_timeout", 0.5) c.read("~publishing_frequency", 30.0) c.sub("chassisCommand", "autorally_msgs/chassisCOmmand") c.sub("runstop", "autorally_msgs/runstop") c.pub(f"{left_steering_controller_name}/command", "std_msgs/Float64") c.pub(f"{right_steering_controller_name}/command", "std_msgs/Float64") c.pub(f"{left_front_axle_controller_name}/command", "std_msgs/Float64") c.pub(f"{right_front_axle_controller_name}/command", "std_msgs/Float64") c.pub(f"{left_rear_axle_controller_name}/command", "std_msgs/Float64") c.pub(f"{right_rear_axle_controller_name}/command", "std_msgs/Float64") assert isinstance(shock_absorbers, list) for shocker in shock_absorbers: assert isinstance(shocker, dict) assert 'controller_name' in shocker c.pub(f"{shocker["controller_name"]}/command", "std_msgs/Float64") # latched = True c.pub("~wheelSpeeds", "autorally_msgs/wheelSpeeds") c.pub("~chassisState", "autorally_msgs/chassisState") assert isinstance(chassis_command_priorities, list) for cmd in chassis_command_priorities: c.sub(f"~/{cmd}/chassisCommand", "autorally_msgs/chassisCommand") c.sub('~/joint_states', "sensor_msgs/JointState") c.provide('~/list_controllers', "controller_manager_msgs/ListControllers") @model('autorally_gazebo', 'ground_truth_republisher.py') def ground_truth_republisher(c: NodeContext) -> None: c.pub('/ground_truth/state', 'nav_msgs/Odometry') c.sub('/ground_truth/state_raw', 'nav_msgs/Odometry')
# -*- coding: utf-8 -*- from ..interpreter import model, NodeContext # Python models for autorally_gazebo # Derived from: https://github.com/AutoRally/autorally/tree/melodic-devel/autorally_gazebo/nodes @model('autorally_gazebo', 'autorally_controller.py') def autorally_controller(c: NodeContext) -> None: c.read("~left_front_wheel/steering_link_name", "left_steering_link") c.read("~right_front_wheel/steering_link_name", "right_steering_link") left_steering_controller_name = \ c.read("~left_front_wheel/steering_controller_name", "left_steering_controller") assert isinstance(left_steering_controller_name, str) right_steering_controller_name = \ c.read("~right_front_wheel/steering_controller_name", "right_steering_controller") assert isinstance(right_steering_controller_name, str) c.read("~left_rear_wheel/link_name", "left_wheel") c.read("~right_rear_wheel/link_name", "right_wheel") left_front_axle_controller_name = \ c.read("~left_front_wheel/axle_controller_name") assert isinstance(left_front_axle_controller_name, str) right_front_axle_controller_name = \ c.read("~right_front_wheel/axle_controller_name") assert isinstance(right_front_axle_controller_name, str) left_rear_axle_controller_name = \ c.read("~left_rear_wheel/axle_controller_name") assert isinstance(left_rear_axle_controller_name, str) right_rear_axle_controller_name = \ c.read("~right_rear_wheel/axle_controller_name") assert isinstance(right_rear_axle_controller_name, str) c.read("~left_front_wheel/diameter", 1.0) c.read("~right_front_wheel/diameter") c.read("~left_rear_wheel/diameter") c.read("~right_rear_wheel/diameter") # https://github.com/AutoRally/autorally/blob/c2692f2970da6874ad9ddfeea3908adaf05b4b09/autorally_gazebo/nodes/autorally_controller.py#L258 chassis_command_priorities = \ c.read("~chassisCommandProirities", []) # Note, misspelling is deliberate shock_absorbers = c.read("~shock_absorbers", []) c.read("~cmd_timeout", 0.5) c.read("~publishing_frequency", 30.0) c.sub("chassisCommand", "autorally_msgs/chassisCOmmand") c.sub("runstop", "autorally_msgs/runstop") c.pub(f"{left_steering_controller_name}/command", "std_msgs/Float64") c.pub(f"{right_steering_controller_name}/command", "std_msgs/Float64") c.pub(f"{left_front_axle_controller_name}/command", "std_msgs/Float64") c.pub(f"{right_front_axle_controller_name}/command", "std_msgs/Float64") c.pub(f"{left_rear_axle_controller_name}/command", "std_msgs/Float64") c.pub(f"{right_rear_axle_controller_name}/command", "std_msgs/Float64") assert isinstance(shock_absorbers, list) for shocker in shock_absorbers: assert isinstance(shocker, dict) assert 'controller_name' in shocker c.pub(f"{shocker['controller_name']}/command", "std_msgs/Float64") # latched = True c.pub("~wheelSpeeds", "autorally_msgs/wheelSpeeds") c.pub("~chassisState", "autorally_msgs/chassisState") assert isinstance(chassis_command_priorities, list) for cmd in chassis_command_priorities: c.sub(f"~/{cmd}/chassisCommand", "autorally_msgs/chassisCommand") c.sub('~/joint_states', "sensor_msgs/JointState") c.provide('~/list_controllers', "controller_manager_msgs/ListControllers") @model('autorally_gazebo', 'ground_truth_republisher.py') def ground_truth_republisher(c: NodeContext) -> None: c.pub('/ground_truth/state', 'nav_msgs/Odometry') c.sub('/ground_truth/state_raw', 'nav_msgs/Odometry')
""" MIT License Copyright (c) 2021 MShawon Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import concurrent.futures.thread import os import platform import sys from concurrent.futures import ThreadPoolExecutor, as_completed from random import choice, randint, uniform from time import gmtime, sleep, strftime import undetected_chromedriver as uc from fake_useragent import UserAgent, UserAgentError from selenium import webdriver from selenium.common.exceptions import (ElementClickInterceptedException, ElementNotInteractableException) from selenium.webdriver.common.by import By from selenium.webdriver.common.keys import Keys from selenium.webdriver.support import expected_conditions as EC from selenium.webdriver.support.ui import WebDriverWait uc.install() os.system("") class bcolors: HEADER = '\033[95m' OKBLUE = '\033[94m' OKCYAN = '\033[96m' OKGREEN = '\033[92m' WARNING = '\033[93m' FAIL = '\033[91m' ENDC = '\033[0m' BOLD = '\033[1m' UNDERLINE = '\033[4m' print(bcolors.OKGREEN + """ Yb dP dP"Yb 88 88 888888 88 88 88""Yb 888888 YbdP dP Yb 88 88 88 88 88 88__dP 88__ 8P Yb dP Y8 8P 88 Y8 8P 88""Yb 88"" dP YbodP `YbodP' 88 `YbodP' 88oodP 888888 Yb dP 88 888888 Yb dP 888888 88""Yb Yb dP 88 88__ Yb db dP 88__ 88__dP YbdP 88 88"" YbdPYbdP 88"" 88"Yb YP 88 888888 YP YP 888888 88 Yb """ + bcolors.ENDC) print(bcolors.OKCYAN + """ [ GitHub : https://github.com/MShawon/YouTube-Viewer ] """ + bcolors.ENDC) print(bcolors.OKCYAN + """ This version has been developed for a project supporter named Anthony Tortolani. """ + bcolors.ENDC) print(bcolors.WARNING + 'Collecting User-Agent...' + bcolors.ENDC) try: ua = UserAgent(use_cache_server=False, verify_ssl=False) except UserAgentError: ua = UserAgent(path='fake_useragent_0.1.11.json') PROXY = None driver = None status = None view = [] duration_dict = {} def load_url(): links = [] print(bcolors.WARNING + 'Loading urls...' + bcolors.ENDC) filename = 'urls.txt' load = open(filename) loaded = [items.rstrip().strip() for items in load] load.close() for lines in loaded: links.append(lines) print(bcolors.OKGREEN + f'{len(links)} url loaded from urls.txt' + bcolors.ENDC) return links def load_search(): search = [] print(bcolors.WARNING + 'Loading queries...' + bcolors.ENDC) filename = 'search.txt' load = open(filename, encoding="utf-8") loaded = [items.rstrip().strip() for items in load] loaded = [[i.strip() for i in items.split(':')] for items in loaded] load.close() for lines in loaded: search.append(lines) print(bcolors.OKGREEN + f'{len(search)} query loaded from search.txt' + bcolors.ENDC) return search def bypassAgree(driver): frame = WebDriverWait(driver, 30).until(EC.element_to_be_clickable( (By.ID, "iframe"))) driver._switch_to.frame(frame) WebDriverWait(driver, 30).until(EC.element_to_be_clickable( (By.ID, "introAgreeButton"))).click() driver.switch_to.default_content() def bypassSignIn(driver): sleep(1) nothanks = WebDriverWait(driver, 30).until(EC.element_to_be_clickable( (By.CLASS_NAME, "style-scope.yt-button-renderer.style-text.size-small"))) nothanks.click() sleep(randint(1, 5)) bypassAgree(driver) def sleeping(): sleep(30) def searchVideo(driver, query): find_video = WebDriverWait(driver, 80).until(EC.element_to_be_clickable( (By.XPATH, f'//*[@title="{query[1]}"]'))) find_video.click() def checkState(driver): try: driver.find_element_by_css_selector('[title^="Pause (k)"]') except: try: driver.find_element_by_css_selector('[title^="Play (k)"]').click() except: driver.find_element_by_css_selector( 'button.ytp-large-play-button.ytp-button').send_keys(Keys.ENTER) def viewVideo(position): try: agent = ua.chrome while OSNAME not in agent: agent = ua.chrome print(bcolors.OKBLUE + f"Tried {position+1} |" + bcolors.OKGREEN + f'{PROXY} --> Searching for videos...' + bcolors.ENDC) if position % 2: method = 1 url = choice(urls) else: method = 2 query = choice(queries) url = f"https://www.youtube.com/results?search_query={query[0].replace(" ", "%20")}" options = webdriver.ChromeOptions() options.headless = background viewport = ['2560,1440', '1920,1080', '1440,900', '1536,864', '1366,768', '1280,1024', '1024,768'] options.add_argument(f"--window-size={choice(viewport)}") options.add_argument("--log-level=3") options.add_experimental_option( "excludeSwitches", ["enable-automation", "enable-logging"]) options.add_experimental_option('useAutomationExtension', False) options.add_argument(f"user-agent={agent}") webdriver.DesiredCapabilities.CHROME['loggingPrefs'] = { 'driver': 'OFF', 'server': 'OFF', 'browser': 'OFF'} webdriver.DesiredCapabilities.CHROME['proxy'] = { "httpProxy": PROXY, "sslProxy": PROXY, "proxyType": "MANUAL", } driver = webdriver.Chrome(options=options) # For testing purposes to see if ip actually changes # To see the result uncomment following these two lines # driver.get('https://ipof.me/') # sleep(30) driver.get(url) try: consent = WebDriverWait(driver, 30).until(EC.element_to_be_clickable( (By.XPATH, "//input[@type='submit' and @value='I agree']"))) consent.submit() except: try: consent = driver.find_element_by_css_selector( 'button.VfPpkd-LgbsSe.VfPpkd-LgbsSe-OWXEXe-k8QpJ.VfPpkd-LgbsSe-OWXEXe-dgl2Hf.nCP5yc.AjY5Oe.DuMIQc.IIdkle') consent.click() except: pass try: if method == 1: play = WebDriverWait(driver, 80).until(EC.element_to_be_clickable( (By.CSS_SELECTOR, "button.ytp-large-play-button.ytp-button"))) play.send_keys(Keys.ENTER) else: searchVideo(driver, query) bypassSignIn(driver) except ElementNotInteractableException: try: bypassSignIn(driver) except ElementClickInterceptedException: bypassAgree(driver) searchVideo(driver, query) except: pass except ElementClickInterceptedException: bypassAgree(driver) searchVideo(driver, query) except: pass checkState(driver) try: video_len = duration_dict[url] except KeyError: video_len = 0 WebDriverWait(driver, 80).until( EC.element_to_be_clickable((By.ID, 'movie_player'))) while video_len == 0: video_len = driver.execute_script( "return document.getElementById('movie_player').getDuration()") duration_dict[url] = video_len # Randomizing watch duration between 85% to 95% of total video duration # to avoid pattern and youtube next suggested video video_len = video_len*uniform(.85, .95) duration = strftime("%Hh:%Mm:%Ss", gmtime(video_len)) print(bcolors.OKBLUE + f"Tried {position+1} |" + bcolors.OKGREEN + f' {PROXY} --> Video Found : {url} | Watch Duration : {duration} ' + bcolors.ENDC) checkState(driver) sleep(video_len) driver.quit() view.append(position) print(bcolors.OKCYAN + f'View added : {len(view)}' + bcolors.ENDC) except Exception as e: *_, exc_tb = sys.exc_info() print(bcolors.FAIL + f"Tried {position+1} | Line : {exc_tb.tb_lineno} | " + str(e) + bcolors.ENDC) driver.quit() pass def main(): pool_number = [i for i in range(100000)] with ThreadPoolExecutor(max_workers=threads) as executor: futures = [executor.submit(viewVideo, position) for position in pool_number] try: for future in as_completed(futures): if len(view) == views: print( bcolors.WARNING + f'Amount of views added : {views} | Stopping program...' + bcolors.ENDC) executor._threads.clear() concurrent.futures.thread._threads_queues.clear() break future.result() except KeyboardInterrupt: executor._threads.clear() concurrent.futures.thread._threads_queues.clear() if __name__ == '__main__': OSNAME = platform.system() if OSNAME == 'Darwin': OSNAME = 'Macintosh' urls = load_url() queries = load_search() views = int(input(bcolors.OKBLUE + 'Amount of views : ' + bcolors.ENDC)) gui = str(input( bcolors.WARNING + 'Do you want to run in headless(background) mode? (recommended=No) [No/yes] : ' + bcolors.ENDC)).lower() if gui == 'n' or gui == 'no' or gui == '': background = False threads = int( input(bcolors.OKBLUE+'Threads (recommended = 5): ' + bcolors.ENDC)) else: background = True threads = int( input(bcolors.OKBLUE+'Threads (recommended = 10): ' + bcolors.ENDC)) PROXY = input(bcolors.WARNING + 'Enter your Rotating Proxy service Main Gateway : ' + bcolors.ENDC) check = -1 while len(view) < views: try: check += 1 if check == 0: main() else: sleeping() print(bcolors.WARNING + f'Total Checked : {check} times' + bcolors.ENDC) main() except KeyboardInterrupt: sys.exit()
""" MIT License Copyright (c) 2021 MShawon Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import concurrent.futures.thread import os import platform import sys from concurrent.futures import ThreadPoolExecutor, as_completed from random import choice, randint, uniform from time import gmtime, sleep, strftime import undetected_chromedriver as uc from fake_useragent import UserAgent, UserAgentError from selenium import webdriver from selenium.common.exceptions import (ElementClickInterceptedException, ElementNotInteractableException) from selenium.webdriver.common.by import By from selenium.webdriver.common.keys import Keys from selenium.webdriver.support import expected_conditions as EC from selenium.webdriver.support.ui import WebDriverWait uc.install() os.system("") class bcolors: HEADER = '\033[95m' OKBLUE = '\033[94m' OKCYAN = '\033[96m' OKGREEN = '\033[92m' WARNING = '\033[93m' FAIL = '\033[91m' ENDC = '\033[0m' BOLD = '\033[1m' UNDERLINE = '\033[4m' print(bcolors.OKGREEN + """ Yb dP dP"Yb 88 88 888888 88 88 88""Yb 888888 YbdP dP Yb 88 88 88 88 88 88__dP 88__ 8P Yb dP Y8 8P 88 Y8 8P 88""Yb 88"" dP YbodP `YbodP' 88 `YbodP' 88oodP 888888 Yb dP 88 888888 Yb dP 888888 88""Yb Yb dP 88 88__ Yb db dP 88__ 88__dP YbdP 88 88"" YbdPYbdP 88"" 88"Yb YP 88 888888 YP YP 888888 88 Yb """ + bcolors.ENDC) print(bcolors.OKCYAN + """ [ GitHub : https://github.com/MShawon/YouTube-Viewer ] """ + bcolors.ENDC) print(bcolors.OKCYAN + """ This version has been developed for a project supporter named Anthony Tortolani. """ + bcolors.ENDC) print(bcolors.WARNING + 'Collecting User-Agent...' + bcolors.ENDC) try: ua = UserAgent(use_cache_server=False, verify_ssl=False) except UserAgentError: ua = UserAgent(path='fake_useragent_0.1.11.json') PROXY = None driver = None status = None view = [] duration_dict = {} def load_url(): links = [] print(bcolors.WARNING + 'Loading urls...' + bcolors.ENDC) filename = 'urls.txt' load = open(filename) loaded = [items.rstrip().strip() for items in load] load.close() for lines in loaded: links.append(lines) print(bcolors.OKGREEN + f'{len(links)} url loaded from urls.txt' + bcolors.ENDC) return links def load_search(): search = [] print(bcolors.WARNING + 'Loading queries...' + bcolors.ENDC) filename = 'search.txt' load = open(filename, encoding="utf-8") loaded = [items.rstrip().strip() for items in load] loaded = [[i.strip() for i in items.split(':')] for items in loaded] load.close() for lines in loaded: search.append(lines) print(bcolors.OKGREEN + f'{len(search)} query loaded from search.txt' + bcolors.ENDC) return search def bypassAgree(driver): frame = WebDriverWait(driver, 30).until(EC.element_to_be_clickable( (By.ID, "iframe"))) driver._switch_to.frame(frame) WebDriverWait(driver, 30).until(EC.element_to_be_clickable( (By.ID, "introAgreeButton"))).click() driver.switch_to.default_content() def bypassSignIn(driver): sleep(1) nothanks = WebDriverWait(driver, 30).until(EC.element_to_be_clickable( (By.CLASS_NAME, "style-scope.yt-button-renderer.style-text.size-small"))) nothanks.click() sleep(randint(1, 5)) bypassAgree(driver) def sleeping(): sleep(30) def searchVideo(driver, query): find_video = WebDriverWait(driver, 80).until(EC.element_to_be_clickable( (By.XPATH, f'//*[@title="{query[1]}"]'))) find_video.click() def checkState(driver): try: driver.find_element_by_css_selector('[title^="Pause (k)"]') except: try: driver.find_element_by_css_selector('[title^="Play (k)"]').click() except: driver.find_element_by_css_selector( 'button.ytp-large-play-button.ytp-button').send_keys(Keys.ENTER) def viewVideo(position): try: agent = ua.chrome while OSNAME not in agent: agent = ua.chrome print(bcolors.OKBLUE + f"Tried {position+1} |" + bcolors.OKGREEN + f'{PROXY} --> Searching for videos...' + bcolors.ENDC) if position % 2: method = 1 url = choice(urls) else: method = 2 query = choice(queries) url = f"https://www.youtube.com/results?search_query={query[0].replace(' ', '%20')}" options = webdriver.ChromeOptions() options.headless = background viewport = ['2560,1440', '1920,1080', '1440,900', '1536,864', '1366,768', '1280,1024', '1024,768'] options.add_argument(f"--window-size={choice(viewport)}") options.add_argument("--log-level=3") options.add_experimental_option( "excludeSwitches", ["enable-automation", "enable-logging"]) options.add_experimental_option('useAutomationExtension', False) options.add_argument(f"user-agent={agent}") webdriver.DesiredCapabilities.CHROME['loggingPrefs'] = { 'driver': 'OFF', 'server': 'OFF', 'browser': 'OFF'} webdriver.DesiredCapabilities.CHROME['proxy'] = { "httpProxy": PROXY, "sslProxy": PROXY, "proxyType": "MANUAL", } driver = webdriver.Chrome(options=options) # For testing purposes to see if ip actually changes # To see the result uncomment following these two lines # driver.get('https://ipof.me/') # sleep(30) driver.get(url) try: consent = WebDriverWait(driver, 30).until(EC.element_to_be_clickable( (By.XPATH, "//input[@type='submit' and @value='I agree']"))) consent.submit() except: try: consent = driver.find_element_by_css_selector( 'button.VfPpkd-LgbsSe.VfPpkd-LgbsSe-OWXEXe-k8QpJ.VfPpkd-LgbsSe-OWXEXe-dgl2Hf.nCP5yc.AjY5Oe.DuMIQc.IIdkle') consent.click() except: pass try: if method == 1: play = WebDriverWait(driver, 80).until(EC.element_to_be_clickable( (By.CSS_SELECTOR, "button.ytp-large-play-button.ytp-button"))) play.send_keys(Keys.ENTER) else: searchVideo(driver, query) bypassSignIn(driver) except ElementNotInteractableException: try: bypassSignIn(driver) except ElementClickInterceptedException: bypassAgree(driver) searchVideo(driver, query) except: pass except ElementClickInterceptedException: bypassAgree(driver) searchVideo(driver, query) except: pass checkState(driver) try: video_len = duration_dict[url] except KeyError: video_len = 0 WebDriverWait(driver, 80).until( EC.element_to_be_clickable((By.ID, 'movie_player'))) while video_len == 0: video_len = driver.execute_script( "return document.getElementById('movie_player').getDuration()") duration_dict[url] = video_len # Randomizing watch duration between 85% to 95% of total video duration # to avoid pattern and youtube next suggested video video_len = video_len*uniform(.85, .95) duration = strftime("%Hh:%Mm:%Ss", gmtime(video_len)) print(bcolors.OKBLUE + f"Tried {position+1} |" + bcolors.OKGREEN + f' {PROXY} --> Video Found : {url} | Watch Duration : {duration} ' + bcolors.ENDC) checkState(driver) sleep(video_len) driver.quit() view.append(position) print(bcolors.OKCYAN + f'View added : {len(view)}' + bcolors.ENDC) except Exception as e: *_, exc_tb = sys.exc_info() print(bcolors.FAIL + f"Tried {position+1} | Line : {exc_tb.tb_lineno} | " + str(e) + bcolors.ENDC) driver.quit() pass def main(): pool_number = [i for i in range(100000)] with ThreadPoolExecutor(max_workers=threads) as executor: futures = [executor.submit(viewVideo, position) for position in pool_number] try: for future in as_completed(futures): if len(view) == views: print( bcolors.WARNING + f'Amount of views added : {views} | Stopping program...' + bcolors.ENDC) executor._threads.clear() concurrent.futures.thread._threads_queues.clear() break future.result() except KeyboardInterrupt: executor._threads.clear() concurrent.futures.thread._threads_queues.clear() if __name__ == '__main__': OSNAME = platform.system() if OSNAME == 'Darwin': OSNAME = 'Macintosh' urls = load_url() queries = load_search() views = int(input(bcolors.OKBLUE + 'Amount of views : ' + bcolors.ENDC)) gui = str(input( bcolors.WARNING + 'Do you want to run in headless(background) mode? (recommended=No) [No/yes] : ' + bcolors.ENDC)).lower() if gui == 'n' or gui == 'no' or gui == '': background = False threads = int( input(bcolors.OKBLUE+'Threads (recommended = 5): ' + bcolors.ENDC)) else: background = True threads = int( input(bcolors.OKBLUE+'Threads (recommended = 10): ' + bcolors.ENDC)) PROXY = input(bcolors.WARNING + 'Enter your Rotating Proxy service Main Gateway : ' + bcolors.ENDC) check = -1 while len(view) < views: try: check += 1 if check == 0: main() else: sleeping() print(bcolors.WARNING + f'Total Checked : {check} times' + bcolors.ENDC) main() except KeyboardInterrupt: sys.exit()
#!/usr/bin/env python3 # -*- coding: utf-8 -*- from __future__ import print_function from .sql_updates import check_and_insert_user_agent from .sql_updates import get_username_random, get_username_to_unfollow_random from .sql_updates import ( get_usernames_first, get_usernames, get_username_row_count, check_if_userid_exists, ) from .sql_updates import insert_media, insert_username, insert_unfollow_count from .sql_updates import check_already_followed, check_already_unfollowed from .sql_updates import check_and_update, check_already_liked import re import time import sqlite3 import signal import random import logging import json import itertools import datetime import atexit from .userinfo import UserInfo from .unfollow_protocol import unfollow_protocol import importlib import os import sys import pickle python_version_test = f"If you are reading this error, you are not running Python 3.6 or greater. Check 'python --version' or 'python3 --version'." # Required Dependencies and Modules, offer to install them automatically # Keep fake_useragent last, quirk for pythonanywhere required_modules = ["requests", "instaloader", "threading", "fake_useragent"] for modname in required_modules: try: # try to import the module normally and put it in globals globals()[modname] = importlib.import_module(modname) except ImportError as e: if modname is not "fake_useragent": print( f"Cannot continue without module {modname} Please install dependencies in requirements.txt. Exiting." ) quit() class InstaBot: """ Instagram bot v 1.2.2 like_per_day=1000 - How many likes set bot in one day. media_max_like=0 - Don't like media (photo or video) if it have more than media_max_like likes. media_min_like=0 - Don't like media (photo or video) if it have less than media_min_like likes. tag_list = ['cat', 'car', 'dog'] - Tag list to like. max_like_for_one_tag=5 - Like 1 to max_like_for_one_tag times by row. log_mod = 0 - Log mod: log_mod = 0 log to console, log_mod = 1 log to file, log_mod = 2 no log. https://github.com/LevPasha/instabot.py """ database_name = None session_file = None follows_db = None follows_db_c = None url = "https://www.instagram.com/" url_tag = "https://www.instagram.com/explore/tags/%s/?__a=1" url_location = "https://www.instagram.com/explore/locations/%s/?__a=1" url_likes = "https://www.instagram.com/web/likes/%s/like/" url_unlike = "https://www.instagram.com/web/likes/%s/unlike/" url_comment = "https://www.instagram.com/web/comments/%s/add/" url_follow = "https://www.instagram.com/web/friendships/%s/follow/" url_unfollow = "https://www.instagram.com/web/friendships/%s/unfollow/" url_login = "https://www.instagram.com/accounts/login/ajax/" url_logout = "https://www.instagram.com/accounts/logout/" url_media_detail = "https://www.instagram.com/p/%s/?__a=1" url_media = "https://www.instagram.com/p/%s/" url_user_detail = "https://www.instagram.com/%s/" api_user_detail = "https://i.instagram.com/api/v1/users/%s/info/" instabot_repo_update = ( "https://github.com/instabot-py/instabot.py/raw/master/version.txt" ) user_agent = "" "" accept_language = "en-US,en;q=0.5" # If instagram ban you - query return 400 error. error_400 = 0 # If you have 3 400 error in row - looks like you banned. error_400_to_ban = 3 # If InstaBot think you are banned - going to sleep. ban_sleep_time = 2 * 60 * 60 # All counter. bot_mode = 0 like_counter = 0 follow_counter = 0 unfollow_counter = 0 comments_counter = 0 current_user = "hajka" current_index = 0 current_id = "abcds" # List of user_id, that bot follow bot_follow_list = [] user_info_list = [] user_list = [] ex_user_list = [] unwanted_username_list = [] is_checked = False is_selebgram = False is_fake_account = False is_active_user = False is_following = False is_follower = False is_rejected = False is_self_checking = False is_by_tag = False is_follower_number = 0 self_following = 0 self_follower = 0 # Log setting. logging.basicConfig(filename="errors.log", level=logging.INFO) log_file_path = "" log_file = 0 # Other. user_id = 0 media_by_tag = 0 media_on_feed = [] media_by_user = [] login_status = False by_location = False # Running Times start_at_h = 0 start_at_m = 0 end_at_h = 23 end_at_m = 59 # For new_auto_mod next_iteration = { "Like": 0, "Follow": 0, "Unfollow": 0, "Comments": 0, "Populate": 0, } prog_run = True def __init__( self, login, password, like_per_day=1000, media_max_like=150, media_min_like=0, user_max_follow=0, user_min_follow=0, follow_per_day=0, follow_time=5 * 60 * 60, # Cannot be zero follow_time_enabled=True, unfollow_per_day=0, unfollow_recent_feed=True, start_at_h=0, start_at_m=0, end_at_h=23, end_at_m=59, database_name=None, session_file=None, comment_list=[ ["this", "the", "your"], ["photo", "picture", "pic", "shot", "snapshot"], ["is", "looks", "feels", "is really"], [ "great", "super", "good", "very good", "good", "wow", "WOW", "cool", "GREAT", "magnificent", "magical", "very cool", "stylish", "beautiful", "so beautiful", "so stylish", "so professional", "lovely", "so lovely", "very lovely", "glorious", "so glorious", "very glorious", "adorable", "excellent", "amazing", ], [".", "..", "...", "!", "!!", "!!!"], ], comments_per_day=0, tag_list=["cat", "car", "dog"], max_like_for_one_tag=5, unfollow_break_min=15, unfollow_break_max=30, log_mod=0, proxy="", user_blacklist={}, tag_blacklist=[], unwanted_username_list=[], unfollow_whitelist=[], ): self.session_file = session_file if database_name is not None: self.database_name = database_name else: self.database_name = f"{login.lower()}.db" self.follows_db = sqlite3.connect( self.database_name, timeout=0, isolation_level=None ) self.follows_db_c = self.follows_db.cursor() check_and_update(self) list_of_ua = [ "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; FSL 7.0.6.01001)", "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; FSL 7.0.7.01001)", "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; FSL 7.0.5.01003)", "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:12.0) Gecko/20100101 Firefox/12.0", "Mozilla/5.0 (X11; U; Linux x86_64; de; rv:1.9.2.8) Gecko/20100723 Ubuntu/10.04 (lucid) Firefox/3.6.8", "Mozilla/5.0 (Windows NT 5.1; rv:13.0) Gecko/20100101 Firefox/13.0.1", "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:11.0) Gecko/20100101 Firefox/11.0", "Mozilla/5.0 (X11; U; Linux x86_64; de; rv:1.9.2.8) Gecko/20100723 Ubuntu/10.04 (lucid) Firefox/3.6.8", "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; .NET CLR 1.0.3705)", "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0)", "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)", "Opera/9.80 (Windows NT 5.1; U; en) Presto/2.10.289 Version/12.01", "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727)", "Mozilla/5.0 (Windows NT 5.1; rv:5.0.1) Gecko/20100101 Firefox/5.0.1", "Mozilla/5.0 (Windows NT 6.1; rv:5.0) Gecko/20100101 Firefox/5.02", "Mozilla/5.0 (Windows NT 6.0) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.112 Safari/535.1", "Mozilla/4.0 (compatible; MSIE 6.0; MSIE 5.5; Windows NT 5.0) Opera 7.02 Bork-edition [en]", ] try: fallback = random.sample(list_of_ua, 1) fake_ua = fake_useragent.UserAgent(fallback=fallback[0]) self.user_agent = check_and_insert_user_agent(self, str(fake_ua)) except: fake_ua = random.sample(list_of_ua, 1) self.user_agent = check_and_insert_user_agent(self, str(fake_ua[0])) self.bot_start = datetime.datetime.now() self.bot_start_ts = time.time() self.start_at_h = start_at_h self.start_at_m = start_at_m self.end_at_h = end_at_h self.end_at_m = end_at_m self.unfollow_break_min = unfollow_break_min self.unfollow_break_max = unfollow_break_max self.user_blacklist = user_blacklist self.tag_blacklist = tag_blacklist self.unfollow_whitelist = unfollow_whitelist self.comment_list = comment_list self.instaloader = instaloader.Instaloader() self.unfollow_recent_feed = unfollow_recent_feed self.time_in_day = 24 * 60 * 60 # Like self.like_per_day = like_per_day if self.like_per_day != 0: self.like_delay = self.time_in_day / self.like_per_day # Follow self.follow_time = follow_time # Cannot be zero self.follow_time_enabled = follow_time_enabled self.follow_per_day = follow_per_day if self.follow_per_day != 0: self.follow_delay = self.time_in_day / self.follow_per_day # Unfollow self.unfollow_per_day = unfollow_per_day if self.unfollow_per_day != 0: self.unfollow_delay = self.time_in_day / self.unfollow_per_day # Comment self.comments_per_day = comments_per_day if self.comments_per_day != 0: self.comments_delay = self.time_in_day / self.comments_per_day # Don't like if media have more than n likes. self.media_max_like = media_max_like # Don't like if media have less than n likes. self.media_min_like = media_min_like # Don't follow if user have more than n followers. self.user_max_follow = user_max_follow # Don't follow if user have less than n followers. self.user_min_follow = user_min_follow # Auto mod seting: # Default list of tag. self.tag_list = tag_list # Get random tag, from tag_list, and like (1 to n) times. self.max_like_for_one_tag = max_like_for_one_tag # log_mod 0 to console, 1 to file self.log_mod = log_mod self.s = requests.Session() self.c = requests.Session() # if you need proxy make something like this: # self.s.proxies = {"https" : "http://proxyip:proxyport"} # by @ageorgios if proxy != "": proxies = {"http": f"http://{proxy}", "https": f"http://{proxy}"} self.s.proxies.update(proxies) self.c.proxies.update(proxies) # convert login to lower self.user_login = login.lower() self.user_password = password self.bot_mode = 0 self.media_by_tag = [] self.media_on_feed = [] self.media_by_user = [] self.current_user_info = "" self.unwanted_username_list = unwanted_username_list now_time = datetime.datetime.now() self.check_for_bot_update() log_string = "Instabot v1.2.2/0 started at %s:" % ( now_time.strftime("%d.%m.%Y %H:%M") ) self.write_log(log_string) self.login() self.populate_user_blacklist() signal.signal(signal.SIGINT, self.cleanup) signal.signal(signal.SIGTERM, self.cleanup) atexit.register(self.cleanup) self.instaload = instaloader.Instaloader() def check_for_bot_update(self): self.write_log("Checking for updates...") try: # CHANGE THIS TO OFFICIAL REPO IF KEPT updated_timestamp = self.c.get(self.instabot_repo_update) current_version_timestamp = open("version.txt", "r") if int(updated_timestamp.text) > int(current_version_timestamp.read()): self.write_log( ">>> UPDATE AVAILABLE <<< Please update Instabot. You are running an older version." ) else: self.write_log("You are running the latest stable version") except: self.write_log("Could not check for updates") def populate_user_blacklist(self): for user in self.user_blacklist: user_id_url = self.url_user_detail % (user) info = self.s.get(user_id_url) # prevent error if 'Account of user was deleted or link is invalid from json import JSONDecodeError try: all_data = json.loads(info.text) except JSONDecodeError as e: self.write_log( f"Account of user {user} was deleted or link is " "invalid" ) else: # prevent exception if user have no media id_user = all_data["user"]["id"] # Update the user_name with the user_id self.user_blacklist[user] = id_user log_string = f"Blacklisted user {user} added with ID: {id_user}" self.write_log(log_string) time.sleep(5 * random.random()) def login(self): successfulLogin = False self.s.headers.update( { "Accept": "*/*", "Accept-Language": self.accept_language, "Accept-Encoding": "gzip, deflate, br", "Connection": "keep-alive", "Host": "www.instagram.com", "Origin": "https://www.instagram.com", "Referer": "https://www.instagram.com/", "User-Agent": self.user_agent, "X-Instagram-AJAX": "1", "Content-Type": "application/x-www-form-urlencoded", "X-Requested-With": "XMLHttpRequest", } ) if self.session_file is not None and os.path.isfile(self.session_file): self.write_log(f"Found session file {self.session_file}") successfulLogin = True with open(self.session_file, "rb") as i: cookies = pickle.load(i) self.s.cookies.update(cookies) else: self.write_log("Trying to login as {}...".format(self.user_login)) self.login_post = { "username": self.user_login, "password": self.user_password, } r = self.s.get(self.url) csrf_token = re.search('(?<="csrf_token":")\w+', r.text).group(0) self.s.headers.update({"X-CSRFToken": csrf_token}) time.sleep(5 * random.random()) login = self.s.post( self.url_login, data=self.login_post, allow_redirects=True ) if ( login.status_code != 200 and login.status_code != 400 ): # Handling Other Status Codes and making debug easier!! self.write_log("Request didn't return 200 as status code!") self.write_log("Here is more info for debbugin or creating an issue") print("=" * 15) print("Response Status: ", login.status_code) print("=" * 15) print("Response Content:\n", login.text) print("=" * 15) print("Response Header:\n", login.headers) print("=" * 15) return loginResponse = login.json() try: self.csrftoken = login.cookies["csrftoken"] self.s.headers.update({"X-CSRFToken": login.cookies["csrftoken"]}) except Exception as e: self.write_log("Something wrong with login") self.write_log(login.text) if loginResponse.get("errors"): self.write_log( "Something is wrong with Instagram! Please try again later..." ) for error in loginResponse["errors"]["error"]: self.write_log(f"Error =>{error}") return if loginResponse.get("message") == "checkpoint_required": try: if "instagram.com" in loginResponse["checkpoint_url"]: challenge_url = loginResponse["checkpoint_url"] else: challenge_url = ( f"https://instagram.com{loginResponse["checkpoint_url"]}" ) self.write_log(f"Challenge required at {challenge_url}") with self.s as clg: clg.headers.update( { "Accept": "*/*", "Accept-Language": self.accept_language, "Accept-Encoding": "gzip, deflate, br", "Connection": "keep-alive", "Host": "www.instagram.com", "Origin": "https://www.instagram.com", "User-Agent": self.user_agent, "X-Instagram-AJAX": "1", "Content-Type": "application/x-www-form-urlencoded", "x-requested-with": "XMLHttpRequest", } ) # Get challenge page challenge_request_explore = clg.get(challenge_url) # Get CSRF Token from challenge page challenge_csrf_token = re.search( '(?<="csrf_token":")\w+', challenge_request_explore.text ).group(0) # Get Rollout Hash from challenge page rollout_hash = re.search( '(?<="rollout_hash":")\w+', challenge_request_explore.text ).group(0) # Ask for option 1 from challenge, which is usually Email or Phone challenge_post = {"choice": 1} # Update headers for challenge submit page clg.headers.update({"X-CSRFToken": challenge_csrf_token}) clg.headers.update({"Referer": challenge_url}) # Request instagram to send a code challenge_request_code = clg.post( challenge_url, data=challenge_post, allow_redirects=True ) # User should receive a code soon, ask for it challenge_userinput_code = input( "Challenge Required.\n\nEnter the code sent to your mail/phone: " ) challenge_security_post = { "security_code": int(challenge_userinput_code) } complete_challenge = clg.post( challenge_url, data=challenge_security_post, allow_redirects=True, ) if complete_challenge.status_code != 200: self.write_log("Entered code is wrong, Try again later!") return self.csrftoken = complete_challenge.cookies["csrftoken"] self.s.headers.update( {"X-CSRFToken": self.csrftoken, "X-Instagram-AJAX": "1"} ) successfulLogin = complete_challenge.status_code == 200 except Exception as err: print(f"Login failed, response: \n\n{login.text} {err}") quit() elif loginResponse.get("authenticated") is False: self.write_log("Login error! Check your login data!") return else: rollout_hash = re.search('(?<="rollout_hash":")\w+', r.text).group(0) self.s.headers.update({"X-Instagram-AJAX": rollout_hash}) successfulLogin = True # ig_vw=1536; ig_pr=1.25; ig_vh=772; ig_or=landscape-primary; self.s.cookies["csrftoken"] = self.csrftoken self.s.cookies["ig_vw"] = "1536" self.s.cookies["ig_pr"] = "1.25" self.s.cookies["ig_vh"] = "772" self.s.cookies["ig_or"] = "landscape-primary" time.sleep(5 * random.random()) if successfulLogin: r = self.s.get("https://www.instagram.com/") self.csrftoken = re.search('(?<="csrf_token":")\w+', r.text).group(0) self.s.cookies["csrftoken"] = self.csrftoken self.s.headers.update({"X-CSRFToken": self.csrftoken}) finder = r.text.find(self.user_login) if finder != -1: ui = UserInfo() self.user_id = ui.get_user_id_by_login(self.user_login) self.login_status = True log_string = f"{self.user_login} login success!\n" self.write_log(log_string) if self.session_file is not None: self.write_log( f"Saving cookies to session file {self.session_file}" ) with open(self.session_file, "wb") as output: pickle.dump(self.s.cookies, output, pickle.HIGHEST_PROTOCOL) else: self.login_status = False self.write_log("Login error! Check your login data!") if self.session_file is not None and os.path.isfile(self.session_file): try: os.remove(self.session_file) except: self.write_log( "Could not delete session file. Please delete manually" ) self.prog_run = False else: self.write_log("Login error! Connection error!") def logout(self): now_time = datetime.datetime.now() log_string = ( "Logout: likes - %i, follow - %i, unfollow - %i, comments - %i." % ( self.like_counter, self.follow_counter, self.unfollow_counter, self.comments_counter, ) ) self.write_log(log_string) work_time = datetime.datetime.now() - self.bot_start log_string = "Bot work time: %s" % (work_time) self.write_log(log_string) try: logout_post = {"csrfmiddlewaretoken": self.csrftoken} logout = self.s.post(self.url_logout, data=logout_post) self.write_log("Logout success!") self.login_status = False except: logging.exception("Logout error!") def cleanup(self, *_): # Unfollow all bot follow if self.follow_counter >= self.unfollow_counter: for i in range(len(self.bot_follow_list)): f = self.bot_follow_list[0] if check_already_unfollowed(self, f[0]): log_string = "Already unfollowed before, skipping: %s" % (f[0]) self.write_log(log_string) else: log_string = "Trying to unfollow: %s" % (f[0]) self.write_log(log_string) self.unfollow_on_cleanup(f[0]) sleeptime = random.randint( self.unfollow_break_min, self.unfollow_break_max ) log_string = "Pausing for %i seconds... %i of %i" % ( sleeptime, self.unfollow_counter, self.follow_counter, ) self.write_log(log_string) time.sleep(sleeptime) self.bot_follow_list.remove(f) # Logout if self.login_status and self.session_file is None: self.logout() self.prog_run = False def get_media_id_by_tag(self, tag): """ Get media ID set, by your hashtag or location """ if self.login_status: if tag.startswith("l:"): tag = tag.replace("l:", "") self.by_location = True log_string = "Get Media by location: %s" % (tag) self.write_log(log_string) if self.login_status == 1: url_location = self.url_location % (tag) try: r = self.s.get(url_location) all_data = json.loads(r.text) self.media_by_tag = list( all_data["graphql"]["location"]["edge_location_to_media"][ "edges" ] ) except: self.media_by_tag = [] self.write_log("Except on get_media!") logging.exception("get_media_id_by_tag") else: return 0 else: log_string = "Get Media by tag: %s" % (tag) self.by_location = False self.write_log(log_string) if self.login_status == 1: url_tag = self.url_tag % (tag) try: r = self.s.get(url_tag) all_data = json.loads(r.text) self.media_by_tag = list( all_data["graphql"]["hashtag"]["edge_hashtag_to_media"][ "edges" ] ) except: self.media_by_tag = [] self.write_log("Except on get_media!") logging.exception("get_media_id_by_tag") else: return 0 def get_instagram_url_from_media_id(self, media_id, url_flag=True, only_code=None): """ Get Media Code or Full Url from Media ID Thanks to Nikished """ media_id = int(media_id) if url_flag is False: return "" else: alphabet = ( "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_" ) shortened_id = "" while media_id > 0: media_id, idx = divmod(media_id, 64) shortened_id = alphabet[idx] + shortened_id if only_code: return shortened_id else: return f"instagram.com/p/{shortened_id}/" def get_username_by_media_id(self, media_id): """ Get username by media ID Thanks to Nikished """ if self.login_status: if self.login_status == 1: media_id_url = self.get_instagram_url_from_media_id( int(media_id), only_code=True ) url_media = self.url_media_detail % (media_id_url) try: r = self.s.get(url_media) all_data = json.loads(r.text) username = str( all_data["graphql"]["shortcode_media"]["owner"]["username"] ) self.write_log( "media_id=" + media_id + ", media_id_url=" + media_id_url + ", username_by_media_id=" + username ) return username except: logging.exception("username_by_mediaid exception") return False else: return "" def get_username_by_user_id(self, user_id): if self.login_status: try: profile = instaloader.Profile.from_id(self.instaload.context, user_id) username = profile.username return username except: logging.exception("Except on get_username_by_user_id") return False else: return False def get_userinfo_by_name(self, username): """ Get user info by name """ if self.login_status: if self.login_status == 1: url_info = self.url_user_detail % (username) try: r = self.s.get(url_info) all_data = json.loads(r.text) user_info = all_data["user"] follows = user_info["follows"]["count"] follower = user_info["followed_by"]["count"] follow_viewer = user_info["follows_viewer"] if follower > 3000 or follows > 1500: self.write_log( " >>>This is probably Selebgram, Business or Fake account" ) if follow_viewer: return None return user_info except: logging.exception("Except on get_userinfo_by_name") return False else: return False def like_all_exist_media(self, media_size=-1, delay=True): """ Like all media ID that have self.media_by_tag """ if self.login_status: if self.media_by_tag != 0: i = 0 for d in self.media_by_tag: # Media count by this tag. if media_size > 0 or media_size < 0: media_size -= 1 l_c = self.media_by_tag[i]["node"]["edge_liked_by"]["count"] if ( (l_c <= self.media_max_like and l_c >= self.media_min_like) or (self.media_max_like == 0 and l_c >= self.media_min_like) or (self.media_min_like == 0 and l_c <= self.media_max_like) or (self.media_min_like == 0 and self.media_max_like == 0) ): for ( blacklisted_user_name, blacklisted_user_id, ) in self.user_blacklist.items(): if ( self.media_by_tag[i]["node"]["owner"]["id"] == blacklisted_user_id ): self.write_log( f"Not liking media owned by blacklisted user: {blacklisted_user_name}" ) return False if ( self.media_by_tag[i]["node"]["owner"]["id"] == self.user_id ): self.write_log("Keep calm - It's your own media ;)") return False if ( check_already_liked( self, media_id=self.media_by_tag[i]["node"]["id"] ) == 1 ): self.write_log("Keep calm - It's already liked ;)") return False try: if ( len( self.media_by_tag[i]["node"][ "edge_media_to_caption" ]["edges"] ) > 1 ): caption = self.media_by_tag[i]["node"][ "edge_media_to_caption" ]["edges"][0]["node"]["text"].encode( "ascii", errors="ignore" ) tag_blacklist = set(self.tag_blacklist) if sys.version_info[0] == 3: tags = { str.lower((tag.decode("ASCII")).strip("#")) for tag in caption.split() if (tag.decode("ASCII")).startswith("#") } else: tags = { unicode.lower( (tag.decode("ASCII")).strip("#") ) for tag in caption.split() if (tag.decode("ASCII")).startswith("#") } if tags.intersection(tag_blacklist): matching_tags = ", ".join( tags.intersection(tag_blacklist) ) self.write_log( f"Not liking media with blacklisted tag(s): {matching_tags}" ) return False except: logging.exception("Except on like_all_exist_media") return False log_string = "Trying to like media: %s" % ( self.media_by_tag[i]["node"]["id"] ) self.write_log(log_string) like = self.like(self.media_by_tag[i]["node"]["id"]) # comment = self.comment(self.media_by_tag[i]['id'], 'Cool!') # follow = self.follow(self.media_by_tag[i]["owner"]["id"]) if like != 0: if like.status_code == 200: # Like, all ok! self.error_400 = 0 self.like_counter += 1 log_string = f"Liked: {self.media_by_tag[i]["node"]["id"]}. Like #{self.like_counter}." insert_media( self, media_id=self.media_by_tag[i]["node"]["id"], status="200", ) self.write_log(log_string) elif like.status_code == 400: log_string = f"Not liked: {like.status_code}" self.write_log(log_string) insert_media( self, media_id=self.media_by_tag[i]["node"]["id"], status="400", ) # Some error. If repeated - can be ban! if self.error_400 >= self.error_400_to_ban: # Look like you banned! time.sleep(self.ban_sleep_time) else: self.error_400 += 1 else: log_string = f"Not liked: {like.status_code}" insert_media( self, media_id=self.media_by_tag[i]["node"]["id"], status=str(like.status_code), ) self.write_log(log_string) return False # Some error. i += 1 if delay: time.sleep( self.like_delay * 0.9 + self.like_delay * 0.2 * random.random() ) else: return True else: return False else: return False else: return False else: self.write_log("No media to like!") def like(self, media_id): """ Send http request to like media by ID """ if self.login_status: url_likes = self.url_likes % (media_id) try: like = self.s.post(url_likes) last_liked_media_id = media_id except: logging.exception("Except on like!") like = 0 return like def unlike(self, media_id): """ Send http request to unlike media by ID """ if self.login_status: url_unlike = self.url_unlike % (media_id) try: unlike = self.s.post(url_unlike) except: logging.exception("Except on unlike!") unlike = 0 return unlike def comment(self, media_id, comment_text): """ Send http request to comment """ if self.login_status: comment_post = {"comment_text": comment_text} url_comment = self.url_comment % (media_id) try: comment = self.s.post(url_comment, data=comment_post) # time.sleep(30) if comment.status_code == 200: self.comments_counter += 1 log_string = f"Write: {comment_text}. #{self.comments_counter}." self.write_log(log_string) time.sleep(40) if comment.status_code == 403: time.sleep(150) return comment except: logging.excepmeon("Except on comment!") return False def follow(self, user_id, username=None): """ Send http request to follow """ if self.login_status: url_follow = self.url_follow % (user_id) if username is None: username = self.get_username_by_user_id(user_id=user_id) try: follow = self.s.post(url_follow) if follow.status_code == 200: self.follow_counter += 1 log_string = f"Followed: {user_id} #{self.follow_counter}." self.write_log(log_string) insert_username(self, user_id=user_id, username=username) return follow except: logging.exception("Except on follow!") return False def unfollow(self, user_id): """ Send http request to unfollow """ if self.login_status: url_unfollow = self.url_unfollow % (user_id) try: unfollow = self.s.post(url_unfollow) if unfollow.status_code == 200: self.unfollow_counter += 1 log_string = f"Unfollowed: {user_id} #{self.unfollow_counter}." self.write_log(log_string) insert_unfollow_count(self, user_id=user_id) return unfollow except: logging.exception("Exept on unfollow!") return False def unfollow_on_cleanup(self, user_id): """ Unfollow on cleanup by @rjmayott """ if self.login_status: url_unfollow = self.url_unfollow % (user_id) try: unfollow = self.s.post(url_unfollow) if unfollow.status_code == 200: self.unfollow_counter += 1 log_string = f"Unfollow: {user_id} #{self.unfollow_counter} of {self.follow_counter}." self.write_log(log_string) insert_unfollow_count(self, user_id=user_id) else: log_string = ( "Slow Down - Pausing for 5 minutes so we don't get banned!" ) self.write_log(log_string) time.sleep(300) unfollow = self.s.post(url_unfollow) if unfollow.status_code == 200: self.unfollow_counter += 1 log_string = f"Unfollow: {user_id} #{self.unfollow_counter} of {self.follow_counter}." self.write_log(log_string) insert_unfollow_count(self, user_id=user_id) else: log_string = "Still no good :( Skipping and pausing for another 5 minutes" self.write_log(log_string) time.sleep(300) return False return unfollow except: log_string = "Except on unfollow... Looks like a network error" logging.exception(log_string) return False def auto_mod(self): """ Star loop, that get media ID by your tag list, and like it """ if self.login_status: while self.prog_run: random.shuffle(self.tag_list) self.get_media_id_by_tag(random.choice(self.tag_list)) self.like_all_exist_media(random.randint(1, self.max_like_for_one_tag)) self.write_log("Exit Program... GoodBye") sys.exit(0) def new_auto_mod(self): while self.prog_run and self.login_status: now = datetime.datetime.now() if datetime.time( self.start_at_h, self.start_at_m ) <= now.time() and now.time() <= datetime.time( self.end_at_h, self.end_at_m ): # ------------------- Get media_id ------------------- if len(self.media_by_tag) == 0: self.get_media_id_by_tag(random.choice(self.tag_list)) self.this_tag_like_count = 0 self.max_tag_like_count = random.randint( 1, self.max_like_for_one_tag ) self.remove_already_liked() # ------------------- Like ------------------- self.new_auto_mod_like() # ------------------- Follow ------------------- self.new_auto_mod_follow() # ------------------- Unfollow ------------------- self.new_auto_mod_unfollow() # ------------------- Comment ------------------- self.new_auto_mod_comments() # Bot iteration in 1 sec time.sleep(3) # print("Tic!") else: print( "!!sleeping until {hour}:{min}".format( hour=self.start_at_h, min=self.start_at_m ), end="\r", ) time.sleep(100) self.write_log("Exit Program... GoodBye") sys.exit(0) def remove_already_liked(self): self.write_log("Removing already liked medias..") x = 0 while x < len(self.media_by_tag): if ( check_already_liked(self, media_id=self.media_by_tag[x]["node"]["id"]) == 1 ): self.media_by_tag.remove(self.media_by_tag[x]) else: x += 1 def new_auto_mod_like(self): if ( time.time() > self.next_iteration["Like"] and self.like_per_day != 0 and len(self.media_by_tag) > 0 ): # You have media_id to like: if self.like_all_exist_media(media_size=1, delay=False): # If like go to sleep: self.next_iteration["Like"] = time.time() + self.add_time( self.like_delay ) # Count this tag likes: self.this_tag_like_count += 1 if self.this_tag_like_count >= self.max_tag_like_count: self.media_by_tag = [0] # Del first media_id try: del self.media_by_tag[0] except: print("Could not remove media") def new_auto_mod_follow(self): username = None if time.time() < self.next_iteration["Follow"]: return if ( time.time() > self.next_iteration["Follow"] and self.follow_per_day != 0 and len(self.media_by_tag) > 0 ): if self.media_by_tag[0]["node"]["owner"]["id"] == self.user_id: self.write_log("Keep calm - It's your own profile ;)") return if self.user_min_follow != 0 or self.user_max_follow != 0: try: username = self.get_username_by_user_id( self.media_by_tag[0]["node"]["owner"]["id"] ) url = self.url_user_detail % (username) r = self.s.get(url) all_data = json.loads( re.search( "window._sharedData = (.*?);</script>", r.text, re.DOTALL ).group(1) ) followers = all_data["entry_data"]["ProfilePage"][0]["graphql"][ "user" ]["edge_followed_by"]["count"] if followers < self.user_min_follow: self.write_log( f"Won't follow {username}: does not meet user_min_follow requirement" ) return if self.user_max_follow != 0 and followers > self.user_max_follow: self.write_log( f"Won't follow {username}: does not meet user_max_follow requirement" ) return except Exception: pass if ( check_already_followed( self, user_id=self.media_by_tag[0]["node"]["owner"]["id"] ) == 1 ): self.write_log( f"Already followed before {self.media_by_tag[0]["node"]["owner"]["id"]}" ) self.next_iteration["Follow"] = time.time() + self.add_time( self.follow_delay / 2 ) return log_string = ( f"Trying to follow: {self.media_by_tag[0]["node"]["owner"]["id"]}" ) self.write_log(log_string) self.next_iteration["Follow"] = time.time() + self.add_time( self.follow_delay ) if ( self.follow( user_id=self.media_by_tag[0]["node"]["owner"]["id"], username=username, ) is not False ): self.bot_follow_list.append( [self.media_by_tag[0]["node"]["owner"]["id"], time.time()] ) self.next_iteration["Follow"] = time.time() + self.add_time( self.follow_delay ) def populate_from_feed(self): self.get_media_id_recent_feed() try: for mediafeed_user in self.media_on_feed: feed_username = mediafeed_user["node"]["owner"]["username"] feed_user_id = mediafeed_user["node"]["owner"]["id"] # print(check_if_userid_exists(self, userid=feed_user_id)) if check_if_userid_exists(self, userid=feed_user_id) is False: insert_username(self, user_id=feed_user_id, username=feed_username) self.write_log(f"Inserted user {feed_username} from recent feed") except: self.write_log("Notice: could not populate from recent feed") def new_auto_mod_unfollow(self): if time.time() > self.next_iteration["Unfollow"] and self.unfollow_per_day != 0: if (time.time() - self.bot_start_ts) < 30: # let bot initialize return if get_username_row_count(self) < 20: self.write_log( f" >>>Waiting for database to populate before unfollowing (progress {str(get_username_row_count(self))} /20)" ) if self.unfollow_recent_feed is True: self.write_log("Will try to populate using recent feed") self.populate_from_feed() self.next_iteration["Unfollow"] = time.time() + ( self.add_time(self.unfollow_delay) / 2 ) return # DB doesn't have enough followers yet if self.bot_mode == 0 or self.bot_mode == 3: try: if ( time.time() > self.next_iteration["Populate"] and self.unfollow_recent_feed is True ): self.populate_from_feed() self.next_iteration["Populate"] = time.time() + ( self.add_time(360) ) except: self.write_log( "Notice: Could not populate from recent feed right now" ) log_string = f"Trying to unfollow #{self.unfollow_counter + 1}:" self.write_log(log_string) self.auto_unfollow() self.next_iteration["Unfollow"] = time.time() + self.add_time( self.unfollow_delay ) if self.bot_mode == 1: unfollow_protocol(self) def new_auto_mod_comments(self): if ( time.time() > self.next_iteration["Comments"] and self.comments_per_day != 0 and len(self.media_by_tag) > 0 and self.check_exisiting_comment(self.media_by_tag[0]["node"]["shortcode"]) is False ): comment_text = self.generate_comment() log_string = f"Trying to comment: {self.media_by_tag[0]["node"]["id"]}" self.write_log(log_string) if ( self.comment(self.media_by_tag[0]["node"]["id"], comment_text) is not False ): self.next_iteration["Comments"] = time.time() + self.add_time( self.comments_delay ) def add_time(self, time): """ Make some random for next iteration""" return time * 0.9 + time * 0.2 * random.random() def generate_comment(self): c_list = list(itertools.product(*self.comment_list)) repl = [(" ", " "), (" .", "."), (" !", "!")] res = " ".join(random.choice(c_list)) for s, r in repl: res = res.replace(s, r) return res.capitalize() def check_exisiting_comment(self, media_code): url_check = self.url_media % (media_code) try: check_comment = self.s.get(url_check) if check_comment.status_code == 200: if "dialog-404" in check_comment.text: self.write_log( f"Tried to comment {media_code} but it doesn't exist (404). Resuming..." ) del self.media_by_tag[0] return True all_data = json.loads( re.search( "window._sharedData = (.*?);", check_comment.text, re.DOTALL ).group(1) )["entry_data"]["PostPage"][ 0 ] # window._sharedData = (.*?); if ( all_data["graphql"]["shortcode_media"]["owner"]["id"] == self.user_id ): self.write_log("Keep calm - It's your own media ;)") # Del media to don't loop on it del self.media_by_tag[0] return True try: comment_list = list( all_data["graphql"]["shortcode_media"]["edge_media_to_comment"][ "edges" ] ) except: comment_list = list( all_data["graphql"]["shortcode_media"][ "edge_media_to_parent_comment" ]["edges"] ) for d in comment_list: if d["node"]["owner"]["id"] == self.user_id: self.write_log("Keep calm - Media already commented ;)") # Del media to don't loop on it del self.media_by_tag[0] return True return False elif check_comment.status_code == 404: insert_media( self, self.media_by_tag[0]["node"]["id"], str(check_comment.status_code), ) self.write_log( f"Tried to comment {media_code} but it doesn't exist (404). Resuming..." ) del self.media_by_tag[0] return True else: insert_media( self, self.media_by_tag[0]["node"]["id"], str(check_comment.status_code), ) self.media_by_tag.remove(self.media_by_tag[0]) return True except: self.write_log("Couldn't comment post, resuming.") del self.media_by_tag[0] return True def auto_unfollow(self): checking = True while checking: username_row = get_username_to_unfollow_random(self) if not username_row: self.write_log("Looks like there is nobody to unfollow.") return False current_id = username_row[0] current_user = username_row[1] unfollow_count = username_row[2] if not current_user: current_user = self.get_username_by_user_id(user_id=current_id) if not current_user: log_string = "api limit reached from instagram. Will try later" self.write_log(log_string) return False for wluser in self.unfollow_whitelist: if wluser == current_user: log_string = "found whitelist user, starting search again" self.write_log(log_string) break else: checking = False if self.login_status: log_string = f"Getting user info : {current_user}" self.write_log(log_string) if self.login_status == 1: url_tag = self.url_user_detail % (current_user) try: r = self.s.get(url_tag) if ( r.text.find( "The link you followed may be broken, or the page may have been removed." ) != -1 ): log_string = ( f"Looks like account was deleted, skipping : {current_user}" ) self.write_log(log_string) insert_unfollow_count(self, user_id=current_id) time.sleep(3) return False all_data = json.loads( re.search( "window._sharedData = (.*?);</script>", r.text, re.DOTALL ).group(1) )["entry_data"]["ProfilePage"][0] user_info = all_data["graphql"]["user"] i = 0 log_string = "Checking user info.." self.write_log(log_string) follows = user_info["edge_follow"]["count"] follower = user_info["edge_followed_by"]["count"] media = user_info["edge_owner_to_timeline_media"]["count"] follow_viewer = user_info["follows_viewer"] followed_by_viewer = user_info["followed_by_viewer"] requested_by_viewer = user_info["requested_by_viewer"] has_requested_viewer = user_info["has_requested_viewer"] log_string = f"Follower : {follower}" self.write_log(log_string) log_string = f"Following : {follows}" self.write_log(log_string) log_string = f"Media : {media}" self.write_log(log_string) if follows == 0 or follower / follows > 2: self.is_selebgram = True self.is_fake_account = False self.write_log(" >>>This is probably Selebgram account") elif follower == 0 or follows / follower > 2: self.is_fake_account = True self.is_selebgram = False self.write_log(" >>>This is probably Fake account") else: self.is_selebgram = False self.is_fake_account = False self.write_log(" >>>This is a normal account") if media > 0 and follows / media < 25 and follower / media < 25: self.is_active_user = True self.write_log(" >>>This user is active") else: self.is_active_user = False self.write_log(" >>>This user is passive") if follow_viewer or has_requested_viewer: self.is_follower = True self.write_log(" >>>This account is following you") else: self.is_follower = False self.write_log(" >>>This account is NOT following you") if followed_by_viewer or requested_by_viewer: self.is_following = True self.write_log(" >>>You are following this account") else: self.is_following = False self.write_log(" >>>You are NOT following this account") except: logging.exception("Except on auto_unfollow!") time.sleep(3) return False else: return False if ( self.is_selebgram is not False or self.is_fake_account is not False or self.is_active_user is not True or self.is_follower is not True ): self.write_log(current_user) self.unfollow(current_id) # don't insert unfollow count as it is done now inside unfollow() # insert_unfollow_count(self, user_id=current_id) elif self.is_following is not True: # we are not following this account, hence we unfollowed it, let's keep track insert_unfollow_count(self, user_id=current_id) def unfollow_recent_feed(self): if len(self.media_on_feed) == 0: self.get_media_id_recent_feed() if ( len(self.media_on_feed) != 0 and self.is_follower_number < 5 and time.time() > self.next_iteration["Unfollow"] and self.unfollow_per_day != 0 ): self.get_media_id_recent_feed() chooser = random.randint(0, len(self.media_on_feed) - 1) self.current_user = self.media_on_feed[chooser]["node"]["owner"]["username"] self.current_id = self.media_on_feed[chooser]["node"]["owner"]["id"] current_user = self.current_user current_id = self.current_id if self.login_status: log_string = f"Getting user info : {current_user}" self.write_log(log_string) if self.login_status == 1: url_tag = self.url_user_detail % (current_user) try: r = self.s.get(url_tag) if ( r.text.find( "The link you followed may be broken, or the page may have been removed." ) != -1 ): log_string = ( f"Looks like account was deleted, skipping : {current_user}" ) self.write_log(log_string) insert_unfollow_count(self, user_id=current_id) time.sleep(3) return False all_data = json.loads( re.search( "window._sharedData = (.*?);</script>", r.text, re.DOTALL ).group(1) )["entry_data"]["ProfilePage"][0] user_info = all_data["graphql"]["user"] i = 0 log_string = "Checking user info.." self.write_log(log_string) follows = user_info["edge_follow"]["count"] follower = user_info["edge_followed_by"]["count"] media = user_info["edge_owner_to_timeline_media"]["count"] follow_viewer = user_info["follows_viewer"] followed_by_viewer = user_info["followed_by_viewer"] requested_by_viewer = user_info["requested_by_viewer"] has_requested_viewer = user_info["has_requested_viewer"] log_string = f"Follower : {follower}" self.write_log(log_string) log_string = f"Following : {follows}" self.write_log(log_string) log_string = f"Media : {media}" self.write_log(log_string) if follows == 0 or follower / follows > 2: self.is_selebgram = True self.is_fake_account = False self.write_log(" >>>This is probably Selebgram account") elif follower == 0 or follows / follower > 2: self.is_fake_account = True self.is_selebgram = False self.write_log(" >>>This is probably Fake account") else: self.is_selebgram = False self.is_fake_account = False self.write_log(" >>>This is a normal account") if media > 0 and follows / media < 25 and follower / media < 25: self.is_active_user = True self.write_log(" >>>This user is active") else: self.is_active_user = False self.write_log(" >>>This user is passive") if follow_viewer or has_requested_viewer: self.is_follower = True self.write_log(" >>>This account is following you") else: self.is_follower = False self.write_log(" >>>This account is NOT following you") if followed_by_viewer or requested_by_viewer: self.is_following = True self.write_log(" >>>You are following this account") else: self.is_following = False self.write_log(" >>>You are NOT following this account") except: logging.exception("Except on auto_unfollow!") time.sleep(3) return False else: return False if ( self.is_selebgram is not False or self.is_fake_account is not False or self.is_active_user is not True or self.is_follower is not True ): self.write_log(current_user) self.unfollow(current_id) self.next_iteration["Unfollow"] = time.time() + self.add_time( self.unfollow_delay ) # don't insert unfollow count as it is done now inside unfollow() # insert_unfollow_count(self, user_id=current_id) elif self.is_following is not True: # we are not following this account, hence we unfollowed it, let's keep track insert_unfollow_count(self, user_id=current_id) time.sleep(8) def get_media_id_recent_feed(self): if self.login_status: now_time = datetime.datetime.now() log_string = f"{self.user_login} : Get media id on recent feed" self.write_log(log_string) if self.login_status == 1: url_tag = "https://www.instagram.com/" try: r = self.s.get(url_tag) jsondata = re.search( "additionalDataLoaded\('feed',({.*})\);", r.text ).group(1) all_data = json.loads(jsondata.strip()) self.media_on_feed = list( all_data["user"]["edge_web_feed_timeline"]["edges"] ) log_string = f"Media in recent feed = {len(self.media_on_feed)}" self.write_log(log_string) except: logging.exception("get_media_id_recent_feed") self.media_on_feed = [] time.sleep(20) return 0 else: return 0 def write_log(self, log_text): """ Write log by print() or logger """ if self.log_mod == 0: try: now_time = datetime.datetime.now() print(f"{now_time.strftime("%d.%m.%Y_%H:%M")} {log_text}") except UnicodeEncodeError: print("Your text has unicode problem!") elif self.log_mod == 1: # Create log_file if not exist. if self.log_file == 0: self.log_file = 1 now_time = datetime.datetime.now() self.log_full_path = "%s%s_%s.log" % ( self.log_file_path, self.user_login, now_time.strftime("%d.%m.%Y_%H:%M"), ) formatter = logging.Formatter("%(asctime)s - %(name)s " "- %(message)s") self.logger = logging.getLogger(self.user_login) self.hdrl = logging.FileHandler(self.log_full_path, mode="w") self.hdrl.setFormatter(formatter) self.logger.setLevel(level=logging.INFO) self.logger.addHandler(self.hdrl) # Log to log file. try: self.logger.info(log_text) except UnicodeEncodeError: print("Your text has unicode problem!")
#!/usr/bin/env python3 # -*- coding: utf-8 -*- from __future__ import print_function from .sql_updates import check_and_insert_user_agent from .sql_updates import get_username_random, get_username_to_unfollow_random from .sql_updates import ( get_usernames_first, get_usernames, get_username_row_count, check_if_userid_exists, ) from .sql_updates import insert_media, insert_username, insert_unfollow_count from .sql_updates import check_already_followed, check_already_unfollowed from .sql_updates import check_and_update, check_already_liked import re import time import sqlite3 import signal import random import logging import json import itertools import datetime import atexit from .userinfo import UserInfo from .unfollow_protocol import unfollow_protocol import importlib import os import sys import pickle python_version_test = f"If you are reading this error, you are not running Python 3.6 or greater. Check 'python --version' or 'python3 --version'." # Required Dependencies and Modules, offer to install them automatically # Keep fake_useragent last, quirk for pythonanywhere required_modules = ["requests", "instaloader", "threading", "fake_useragent"] for modname in required_modules: try: # try to import the module normally and put it in globals globals()[modname] = importlib.import_module(modname) except ImportError as e: if modname is not "fake_useragent": print( f"Cannot continue without module {modname} Please install dependencies in requirements.txt. Exiting." ) quit() class InstaBot: """ Instagram bot v 1.2.2 like_per_day=1000 - How many likes set bot in one day. media_max_like=0 - Don't like media (photo or video) if it have more than media_max_like likes. media_min_like=0 - Don't like media (photo or video) if it have less than media_min_like likes. tag_list = ['cat', 'car', 'dog'] - Tag list to like. max_like_for_one_tag=5 - Like 1 to max_like_for_one_tag times by row. log_mod = 0 - Log mod: log_mod = 0 log to console, log_mod = 1 log to file, log_mod = 2 no log. https://github.com/LevPasha/instabot.py """ database_name = None session_file = None follows_db = None follows_db_c = None url = "https://www.instagram.com/" url_tag = "https://www.instagram.com/explore/tags/%s/?__a=1" url_location = "https://www.instagram.com/explore/locations/%s/?__a=1" url_likes = "https://www.instagram.com/web/likes/%s/like/" url_unlike = "https://www.instagram.com/web/likes/%s/unlike/" url_comment = "https://www.instagram.com/web/comments/%s/add/" url_follow = "https://www.instagram.com/web/friendships/%s/follow/" url_unfollow = "https://www.instagram.com/web/friendships/%s/unfollow/" url_login = "https://www.instagram.com/accounts/login/ajax/" url_logout = "https://www.instagram.com/accounts/logout/" url_media_detail = "https://www.instagram.com/p/%s/?__a=1" url_media = "https://www.instagram.com/p/%s/" url_user_detail = "https://www.instagram.com/%s/" api_user_detail = "https://i.instagram.com/api/v1/users/%s/info/" instabot_repo_update = ( "https://github.com/instabot-py/instabot.py/raw/master/version.txt" ) user_agent = "" "" accept_language = "en-US,en;q=0.5" # If instagram ban you - query return 400 error. error_400 = 0 # If you have 3 400 error in row - looks like you banned. error_400_to_ban = 3 # If InstaBot think you are banned - going to sleep. ban_sleep_time = 2 * 60 * 60 # All counter. bot_mode = 0 like_counter = 0 follow_counter = 0 unfollow_counter = 0 comments_counter = 0 current_user = "hajka" current_index = 0 current_id = "abcds" # List of user_id, that bot follow bot_follow_list = [] user_info_list = [] user_list = [] ex_user_list = [] unwanted_username_list = [] is_checked = False is_selebgram = False is_fake_account = False is_active_user = False is_following = False is_follower = False is_rejected = False is_self_checking = False is_by_tag = False is_follower_number = 0 self_following = 0 self_follower = 0 # Log setting. logging.basicConfig(filename="errors.log", level=logging.INFO) log_file_path = "" log_file = 0 # Other. user_id = 0 media_by_tag = 0 media_on_feed = [] media_by_user = [] login_status = False by_location = False # Running Times start_at_h = 0 start_at_m = 0 end_at_h = 23 end_at_m = 59 # For new_auto_mod next_iteration = { "Like": 0, "Follow": 0, "Unfollow": 0, "Comments": 0, "Populate": 0, } prog_run = True def __init__( self, login, password, like_per_day=1000, media_max_like=150, media_min_like=0, user_max_follow=0, user_min_follow=0, follow_per_day=0, follow_time=5 * 60 * 60, # Cannot be zero follow_time_enabled=True, unfollow_per_day=0, unfollow_recent_feed=True, start_at_h=0, start_at_m=0, end_at_h=23, end_at_m=59, database_name=None, session_file=None, comment_list=[ ["this", "the", "your"], ["photo", "picture", "pic", "shot", "snapshot"], ["is", "looks", "feels", "is really"], [ "great", "super", "good", "very good", "good", "wow", "WOW", "cool", "GREAT", "magnificent", "magical", "very cool", "stylish", "beautiful", "so beautiful", "so stylish", "so professional", "lovely", "so lovely", "very lovely", "glorious", "so glorious", "very glorious", "adorable", "excellent", "amazing", ], [".", "..", "...", "!", "!!", "!!!"], ], comments_per_day=0, tag_list=["cat", "car", "dog"], max_like_for_one_tag=5, unfollow_break_min=15, unfollow_break_max=30, log_mod=0, proxy="", user_blacklist={}, tag_blacklist=[], unwanted_username_list=[], unfollow_whitelist=[], ): self.session_file = session_file if database_name is not None: self.database_name = database_name else: self.database_name = f"{login.lower()}.db" self.follows_db = sqlite3.connect( self.database_name, timeout=0, isolation_level=None ) self.follows_db_c = self.follows_db.cursor() check_and_update(self) list_of_ua = [ "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; FSL 7.0.6.01001)", "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; FSL 7.0.7.01001)", "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; FSL 7.0.5.01003)", "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:12.0) Gecko/20100101 Firefox/12.0", "Mozilla/5.0 (X11; U; Linux x86_64; de; rv:1.9.2.8) Gecko/20100723 Ubuntu/10.04 (lucid) Firefox/3.6.8", "Mozilla/5.0 (Windows NT 5.1; rv:13.0) Gecko/20100101 Firefox/13.0.1", "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:11.0) Gecko/20100101 Firefox/11.0", "Mozilla/5.0 (X11; U; Linux x86_64; de; rv:1.9.2.8) Gecko/20100723 Ubuntu/10.04 (lucid) Firefox/3.6.8", "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; .NET CLR 1.0.3705)", "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0)", "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)", "Opera/9.80 (Windows NT 5.1; U; en) Presto/2.10.289 Version/12.01", "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727)", "Mozilla/5.0 (Windows NT 5.1; rv:5.0.1) Gecko/20100101 Firefox/5.0.1", "Mozilla/5.0 (Windows NT 6.1; rv:5.0) Gecko/20100101 Firefox/5.02", "Mozilla/5.0 (Windows NT 6.0) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.112 Safari/535.1", "Mozilla/4.0 (compatible; MSIE 6.0; MSIE 5.5; Windows NT 5.0) Opera 7.02 Bork-edition [en]", ] try: fallback = random.sample(list_of_ua, 1) fake_ua = fake_useragent.UserAgent(fallback=fallback[0]) self.user_agent = check_and_insert_user_agent(self, str(fake_ua)) except: fake_ua = random.sample(list_of_ua, 1) self.user_agent = check_and_insert_user_agent(self, str(fake_ua[0])) self.bot_start = datetime.datetime.now() self.bot_start_ts = time.time() self.start_at_h = start_at_h self.start_at_m = start_at_m self.end_at_h = end_at_h self.end_at_m = end_at_m self.unfollow_break_min = unfollow_break_min self.unfollow_break_max = unfollow_break_max self.user_blacklist = user_blacklist self.tag_blacklist = tag_blacklist self.unfollow_whitelist = unfollow_whitelist self.comment_list = comment_list self.instaloader = instaloader.Instaloader() self.unfollow_recent_feed = unfollow_recent_feed self.time_in_day = 24 * 60 * 60 # Like self.like_per_day = like_per_day if self.like_per_day != 0: self.like_delay = self.time_in_day / self.like_per_day # Follow self.follow_time = follow_time # Cannot be zero self.follow_time_enabled = follow_time_enabled self.follow_per_day = follow_per_day if self.follow_per_day != 0: self.follow_delay = self.time_in_day / self.follow_per_day # Unfollow self.unfollow_per_day = unfollow_per_day if self.unfollow_per_day != 0: self.unfollow_delay = self.time_in_day / self.unfollow_per_day # Comment self.comments_per_day = comments_per_day if self.comments_per_day != 0: self.comments_delay = self.time_in_day / self.comments_per_day # Don't like if media have more than n likes. self.media_max_like = media_max_like # Don't like if media have less than n likes. self.media_min_like = media_min_like # Don't follow if user have more than n followers. self.user_max_follow = user_max_follow # Don't follow if user have less than n followers. self.user_min_follow = user_min_follow # Auto mod seting: # Default list of tag. self.tag_list = tag_list # Get random tag, from tag_list, and like (1 to n) times. self.max_like_for_one_tag = max_like_for_one_tag # log_mod 0 to console, 1 to file self.log_mod = log_mod self.s = requests.Session() self.c = requests.Session() # if you need proxy make something like this: # self.s.proxies = {"https" : "http://proxyip:proxyport"} # by @ageorgios if proxy != "": proxies = {"http": f"http://{proxy}", "https": f"http://{proxy}"} self.s.proxies.update(proxies) self.c.proxies.update(proxies) # convert login to lower self.user_login = login.lower() self.user_password = password self.bot_mode = 0 self.media_by_tag = [] self.media_on_feed = [] self.media_by_user = [] self.current_user_info = "" self.unwanted_username_list = unwanted_username_list now_time = datetime.datetime.now() self.check_for_bot_update() log_string = "Instabot v1.2.2/0 started at %s:" % ( now_time.strftime("%d.%m.%Y %H:%M") ) self.write_log(log_string) self.login() self.populate_user_blacklist() signal.signal(signal.SIGINT, self.cleanup) signal.signal(signal.SIGTERM, self.cleanup) atexit.register(self.cleanup) self.instaload = instaloader.Instaloader() def check_for_bot_update(self): self.write_log("Checking for updates...") try: # CHANGE THIS TO OFFICIAL REPO IF KEPT updated_timestamp = self.c.get(self.instabot_repo_update) current_version_timestamp = open("version.txt", "r") if int(updated_timestamp.text) > int(current_version_timestamp.read()): self.write_log( ">>> UPDATE AVAILABLE <<< Please update Instabot. You are running an older version." ) else: self.write_log("You are running the latest stable version") except: self.write_log("Could not check for updates") def populate_user_blacklist(self): for user in self.user_blacklist: user_id_url = self.url_user_detail % (user) info = self.s.get(user_id_url) # prevent error if 'Account of user was deleted or link is invalid from json import JSONDecodeError try: all_data = json.loads(info.text) except JSONDecodeError as e: self.write_log( f"Account of user {user} was deleted or link is " "invalid" ) else: # prevent exception if user have no media id_user = all_data["user"]["id"] # Update the user_name with the user_id self.user_blacklist[user] = id_user log_string = f"Blacklisted user {user} added with ID: {id_user}" self.write_log(log_string) time.sleep(5 * random.random()) def login(self): successfulLogin = False self.s.headers.update( { "Accept": "*/*", "Accept-Language": self.accept_language, "Accept-Encoding": "gzip, deflate, br", "Connection": "keep-alive", "Host": "www.instagram.com", "Origin": "https://www.instagram.com", "Referer": "https://www.instagram.com/", "User-Agent": self.user_agent, "X-Instagram-AJAX": "1", "Content-Type": "application/x-www-form-urlencoded", "X-Requested-With": "XMLHttpRequest", } ) if self.session_file is not None and os.path.isfile(self.session_file): self.write_log(f"Found session file {self.session_file}") successfulLogin = True with open(self.session_file, "rb") as i: cookies = pickle.load(i) self.s.cookies.update(cookies) else: self.write_log("Trying to login as {}...".format(self.user_login)) self.login_post = { "username": self.user_login, "password": self.user_password, } r = self.s.get(self.url) csrf_token = re.search('(?<="csrf_token":")\w+', r.text).group(0) self.s.headers.update({"X-CSRFToken": csrf_token}) time.sleep(5 * random.random()) login = self.s.post( self.url_login, data=self.login_post, allow_redirects=True ) if ( login.status_code != 200 and login.status_code != 400 ): # Handling Other Status Codes and making debug easier!! self.write_log("Request didn't return 200 as status code!") self.write_log("Here is more info for debbugin or creating an issue") print("=" * 15) print("Response Status: ", login.status_code) print("=" * 15) print("Response Content:\n", login.text) print("=" * 15) print("Response Header:\n", login.headers) print("=" * 15) return loginResponse = login.json() try: self.csrftoken = login.cookies["csrftoken"] self.s.headers.update({"X-CSRFToken": login.cookies["csrftoken"]}) except Exception as e: self.write_log("Something wrong with login") self.write_log(login.text) if loginResponse.get("errors"): self.write_log( "Something is wrong with Instagram! Please try again later..." ) for error in loginResponse["errors"]["error"]: self.write_log(f"Error =>{error}") return if loginResponse.get("message") == "checkpoint_required": try: if "instagram.com" in loginResponse["checkpoint_url"]: challenge_url = loginResponse["checkpoint_url"] else: challenge_url = ( f"https://instagram.com{loginResponse['checkpoint_url']}" ) self.write_log(f"Challenge required at {challenge_url}") with self.s as clg: clg.headers.update( { "Accept": "*/*", "Accept-Language": self.accept_language, "Accept-Encoding": "gzip, deflate, br", "Connection": "keep-alive", "Host": "www.instagram.com", "Origin": "https://www.instagram.com", "User-Agent": self.user_agent, "X-Instagram-AJAX": "1", "Content-Type": "application/x-www-form-urlencoded", "x-requested-with": "XMLHttpRequest", } ) # Get challenge page challenge_request_explore = clg.get(challenge_url) # Get CSRF Token from challenge page challenge_csrf_token = re.search( '(?<="csrf_token":")\w+', challenge_request_explore.text ).group(0) # Get Rollout Hash from challenge page rollout_hash = re.search( '(?<="rollout_hash":")\w+', challenge_request_explore.text ).group(0) # Ask for option 1 from challenge, which is usually Email or Phone challenge_post = {"choice": 1} # Update headers for challenge submit page clg.headers.update({"X-CSRFToken": challenge_csrf_token}) clg.headers.update({"Referer": challenge_url}) # Request instagram to send a code challenge_request_code = clg.post( challenge_url, data=challenge_post, allow_redirects=True ) # User should receive a code soon, ask for it challenge_userinput_code = input( "Challenge Required.\n\nEnter the code sent to your mail/phone: " ) challenge_security_post = { "security_code": int(challenge_userinput_code) } complete_challenge = clg.post( challenge_url, data=challenge_security_post, allow_redirects=True, ) if complete_challenge.status_code != 200: self.write_log("Entered code is wrong, Try again later!") return self.csrftoken = complete_challenge.cookies["csrftoken"] self.s.headers.update( {"X-CSRFToken": self.csrftoken, "X-Instagram-AJAX": "1"} ) successfulLogin = complete_challenge.status_code == 200 except Exception as err: print(f"Login failed, response: \n\n{login.text} {err}") quit() elif loginResponse.get("authenticated") is False: self.write_log("Login error! Check your login data!") return else: rollout_hash = re.search('(?<="rollout_hash":")\w+', r.text).group(0) self.s.headers.update({"X-Instagram-AJAX": rollout_hash}) successfulLogin = True # ig_vw=1536; ig_pr=1.25; ig_vh=772; ig_or=landscape-primary; self.s.cookies["csrftoken"] = self.csrftoken self.s.cookies["ig_vw"] = "1536" self.s.cookies["ig_pr"] = "1.25" self.s.cookies["ig_vh"] = "772" self.s.cookies["ig_or"] = "landscape-primary" time.sleep(5 * random.random()) if successfulLogin: r = self.s.get("https://www.instagram.com/") self.csrftoken = re.search('(?<="csrf_token":")\w+', r.text).group(0) self.s.cookies["csrftoken"] = self.csrftoken self.s.headers.update({"X-CSRFToken": self.csrftoken}) finder = r.text.find(self.user_login) if finder != -1: ui = UserInfo() self.user_id = ui.get_user_id_by_login(self.user_login) self.login_status = True log_string = f"{self.user_login} login success!\n" self.write_log(log_string) if self.session_file is not None: self.write_log( f"Saving cookies to session file {self.session_file}" ) with open(self.session_file, "wb") as output: pickle.dump(self.s.cookies, output, pickle.HIGHEST_PROTOCOL) else: self.login_status = False self.write_log("Login error! Check your login data!") if self.session_file is not None and os.path.isfile(self.session_file): try: os.remove(self.session_file) except: self.write_log( "Could not delete session file. Please delete manually" ) self.prog_run = False else: self.write_log("Login error! Connection error!") def logout(self): now_time = datetime.datetime.now() log_string = ( "Logout: likes - %i, follow - %i, unfollow - %i, comments - %i." % ( self.like_counter, self.follow_counter, self.unfollow_counter, self.comments_counter, ) ) self.write_log(log_string) work_time = datetime.datetime.now() - self.bot_start log_string = "Bot work time: %s" % (work_time) self.write_log(log_string) try: logout_post = {"csrfmiddlewaretoken": self.csrftoken} logout = self.s.post(self.url_logout, data=logout_post) self.write_log("Logout success!") self.login_status = False except: logging.exception("Logout error!") def cleanup(self, *_): # Unfollow all bot follow if self.follow_counter >= self.unfollow_counter: for i in range(len(self.bot_follow_list)): f = self.bot_follow_list[0] if check_already_unfollowed(self, f[0]): log_string = "Already unfollowed before, skipping: %s" % (f[0]) self.write_log(log_string) else: log_string = "Trying to unfollow: %s" % (f[0]) self.write_log(log_string) self.unfollow_on_cleanup(f[0]) sleeptime = random.randint( self.unfollow_break_min, self.unfollow_break_max ) log_string = "Pausing for %i seconds... %i of %i" % ( sleeptime, self.unfollow_counter, self.follow_counter, ) self.write_log(log_string) time.sleep(sleeptime) self.bot_follow_list.remove(f) # Logout if self.login_status and self.session_file is None: self.logout() self.prog_run = False def get_media_id_by_tag(self, tag): """ Get media ID set, by your hashtag or location """ if self.login_status: if tag.startswith("l:"): tag = tag.replace("l:", "") self.by_location = True log_string = "Get Media by location: %s" % (tag) self.write_log(log_string) if self.login_status == 1: url_location = self.url_location % (tag) try: r = self.s.get(url_location) all_data = json.loads(r.text) self.media_by_tag = list( all_data["graphql"]["location"]["edge_location_to_media"][ "edges" ] ) except: self.media_by_tag = [] self.write_log("Except on get_media!") logging.exception("get_media_id_by_tag") else: return 0 else: log_string = "Get Media by tag: %s" % (tag) self.by_location = False self.write_log(log_string) if self.login_status == 1: url_tag = self.url_tag % (tag) try: r = self.s.get(url_tag) all_data = json.loads(r.text) self.media_by_tag = list( all_data["graphql"]["hashtag"]["edge_hashtag_to_media"][ "edges" ] ) except: self.media_by_tag = [] self.write_log("Except on get_media!") logging.exception("get_media_id_by_tag") else: return 0 def get_instagram_url_from_media_id(self, media_id, url_flag=True, only_code=None): """ Get Media Code or Full Url from Media ID Thanks to Nikished """ media_id = int(media_id) if url_flag is False: return "" else: alphabet = ( "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_" ) shortened_id = "" while media_id > 0: media_id, idx = divmod(media_id, 64) shortened_id = alphabet[idx] + shortened_id if only_code: return shortened_id else: return f"instagram.com/p/{shortened_id}/" def get_username_by_media_id(self, media_id): """ Get username by media ID Thanks to Nikished """ if self.login_status: if self.login_status == 1: media_id_url = self.get_instagram_url_from_media_id( int(media_id), only_code=True ) url_media = self.url_media_detail % (media_id_url) try: r = self.s.get(url_media) all_data = json.loads(r.text) username = str( all_data["graphql"]["shortcode_media"]["owner"]["username"] ) self.write_log( "media_id=" + media_id + ", media_id_url=" + media_id_url + ", username_by_media_id=" + username ) return username except: logging.exception("username_by_mediaid exception") return False else: return "" def get_username_by_user_id(self, user_id): if self.login_status: try: profile = instaloader.Profile.from_id(self.instaload.context, user_id) username = profile.username return username except: logging.exception("Except on get_username_by_user_id") return False else: return False def get_userinfo_by_name(self, username): """ Get user info by name """ if self.login_status: if self.login_status == 1: url_info = self.url_user_detail % (username) try: r = self.s.get(url_info) all_data = json.loads(r.text) user_info = all_data["user"] follows = user_info["follows"]["count"] follower = user_info["followed_by"]["count"] follow_viewer = user_info["follows_viewer"] if follower > 3000 or follows > 1500: self.write_log( " >>>This is probably Selebgram, Business or Fake account" ) if follow_viewer: return None return user_info except: logging.exception("Except on get_userinfo_by_name") return False else: return False def like_all_exist_media(self, media_size=-1, delay=True): """ Like all media ID that have self.media_by_tag """ if self.login_status: if self.media_by_tag != 0: i = 0 for d in self.media_by_tag: # Media count by this tag. if media_size > 0 or media_size < 0: media_size -= 1 l_c = self.media_by_tag[i]["node"]["edge_liked_by"]["count"] if ( (l_c <= self.media_max_like and l_c >= self.media_min_like) or (self.media_max_like == 0 and l_c >= self.media_min_like) or (self.media_min_like == 0 and l_c <= self.media_max_like) or (self.media_min_like == 0 and self.media_max_like == 0) ): for ( blacklisted_user_name, blacklisted_user_id, ) in self.user_blacklist.items(): if ( self.media_by_tag[i]["node"]["owner"]["id"] == blacklisted_user_id ): self.write_log( f"Not liking media owned by blacklisted user: {blacklisted_user_name}" ) return False if ( self.media_by_tag[i]["node"]["owner"]["id"] == self.user_id ): self.write_log("Keep calm - It's your own media ;)") return False if ( check_already_liked( self, media_id=self.media_by_tag[i]["node"]["id"] ) == 1 ): self.write_log("Keep calm - It's already liked ;)") return False try: if ( len( self.media_by_tag[i]["node"][ "edge_media_to_caption" ]["edges"] ) > 1 ): caption = self.media_by_tag[i]["node"][ "edge_media_to_caption" ]["edges"][0]["node"]["text"].encode( "ascii", errors="ignore" ) tag_blacklist = set(self.tag_blacklist) if sys.version_info[0] == 3: tags = { str.lower((tag.decode("ASCII")).strip("#")) for tag in caption.split() if (tag.decode("ASCII")).startswith("#") } else: tags = { unicode.lower( (tag.decode("ASCII")).strip("#") ) for tag in caption.split() if (tag.decode("ASCII")).startswith("#") } if tags.intersection(tag_blacklist): matching_tags = ", ".join( tags.intersection(tag_blacklist) ) self.write_log( f"Not liking media with blacklisted tag(s): {matching_tags}" ) return False except: logging.exception("Except on like_all_exist_media") return False log_string = "Trying to like media: %s" % ( self.media_by_tag[i]["node"]["id"] ) self.write_log(log_string) like = self.like(self.media_by_tag[i]["node"]["id"]) # comment = self.comment(self.media_by_tag[i]['id'], 'Cool!') # follow = self.follow(self.media_by_tag[i]["owner"]["id"]) if like != 0: if like.status_code == 200: # Like, all ok! self.error_400 = 0 self.like_counter += 1 log_string = f"Liked: {self.media_by_tag[i]['node']['id']}. Like #{self.like_counter}." insert_media( self, media_id=self.media_by_tag[i]["node"]["id"], status="200", ) self.write_log(log_string) elif like.status_code == 400: log_string = f"Not liked: {like.status_code}" self.write_log(log_string) insert_media( self, media_id=self.media_by_tag[i]["node"]["id"], status="400", ) # Some error. If repeated - can be ban! if self.error_400 >= self.error_400_to_ban: # Look like you banned! time.sleep(self.ban_sleep_time) else: self.error_400 += 1 else: log_string = f"Not liked: {like.status_code}" insert_media( self, media_id=self.media_by_tag[i]["node"]["id"], status=str(like.status_code), ) self.write_log(log_string) return False # Some error. i += 1 if delay: time.sleep( self.like_delay * 0.9 + self.like_delay * 0.2 * random.random() ) else: return True else: return False else: return False else: return False else: self.write_log("No media to like!") def like(self, media_id): """ Send http request to like media by ID """ if self.login_status: url_likes = self.url_likes % (media_id) try: like = self.s.post(url_likes) last_liked_media_id = media_id except: logging.exception("Except on like!") like = 0 return like def unlike(self, media_id): """ Send http request to unlike media by ID """ if self.login_status: url_unlike = self.url_unlike % (media_id) try: unlike = self.s.post(url_unlike) except: logging.exception("Except on unlike!") unlike = 0 return unlike def comment(self, media_id, comment_text): """ Send http request to comment """ if self.login_status: comment_post = {"comment_text": comment_text} url_comment = self.url_comment % (media_id) try: comment = self.s.post(url_comment, data=comment_post) # time.sleep(30) if comment.status_code == 200: self.comments_counter += 1 log_string = f"Write: {comment_text}. #{self.comments_counter}." self.write_log(log_string) time.sleep(40) if comment.status_code == 403: time.sleep(150) return comment except: logging.excepmeon("Except on comment!") return False def follow(self, user_id, username=None): """ Send http request to follow """ if self.login_status: url_follow = self.url_follow % (user_id) if username is None: username = self.get_username_by_user_id(user_id=user_id) try: follow = self.s.post(url_follow) if follow.status_code == 200: self.follow_counter += 1 log_string = f"Followed: {user_id} #{self.follow_counter}." self.write_log(log_string) insert_username(self, user_id=user_id, username=username) return follow except: logging.exception("Except on follow!") return False def unfollow(self, user_id): """ Send http request to unfollow """ if self.login_status: url_unfollow = self.url_unfollow % (user_id) try: unfollow = self.s.post(url_unfollow) if unfollow.status_code == 200: self.unfollow_counter += 1 log_string = f"Unfollowed: {user_id} #{self.unfollow_counter}." self.write_log(log_string) insert_unfollow_count(self, user_id=user_id) return unfollow except: logging.exception("Exept on unfollow!") return False def unfollow_on_cleanup(self, user_id): """ Unfollow on cleanup by @rjmayott """ if self.login_status: url_unfollow = self.url_unfollow % (user_id) try: unfollow = self.s.post(url_unfollow) if unfollow.status_code == 200: self.unfollow_counter += 1 log_string = f"Unfollow: {user_id} #{self.unfollow_counter} of {self.follow_counter}." self.write_log(log_string) insert_unfollow_count(self, user_id=user_id) else: log_string = ( "Slow Down - Pausing for 5 minutes so we don't get banned!" ) self.write_log(log_string) time.sleep(300) unfollow = self.s.post(url_unfollow) if unfollow.status_code == 200: self.unfollow_counter += 1 log_string = f"Unfollow: {user_id} #{self.unfollow_counter} of {self.follow_counter}." self.write_log(log_string) insert_unfollow_count(self, user_id=user_id) else: log_string = "Still no good :( Skipping and pausing for another 5 minutes" self.write_log(log_string) time.sleep(300) return False return unfollow except: log_string = "Except on unfollow... Looks like a network error" logging.exception(log_string) return False def auto_mod(self): """ Star loop, that get media ID by your tag list, and like it """ if self.login_status: while self.prog_run: random.shuffle(self.tag_list) self.get_media_id_by_tag(random.choice(self.tag_list)) self.like_all_exist_media(random.randint(1, self.max_like_for_one_tag)) self.write_log("Exit Program... GoodBye") sys.exit(0) def new_auto_mod(self): while self.prog_run and self.login_status: now = datetime.datetime.now() if datetime.time( self.start_at_h, self.start_at_m ) <= now.time() and now.time() <= datetime.time( self.end_at_h, self.end_at_m ): # ------------------- Get media_id ------------------- if len(self.media_by_tag) == 0: self.get_media_id_by_tag(random.choice(self.tag_list)) self.this_tag_like_count = 0 self.max_tag_like_count = random.randint( 1, self.max_like_for_one_tag ) self.remove_already_liked() # ------------------- Like ------------------- self.new_auto_mod_like() # ------------------- Follow ------------------- self.new_auto_mod_follow() # ------------------- Unfollow ------------------- self.new_auto_mod_unfollow() # ------------------- Comment ------------------- self.new_auto_mod_comments() # Bot iteration in 1 sec time.sleep(3) # print("Tic!") else: print( "!!sleeping until {hour}:{min}".format( hour=self.start_at_h, min=self.start_at_m ), end="\r", ) time.sleep(100) self.write_log("Exit Program... GoodBye") sys.exit(0) def remove_already_liked(self): self.write_log("Removing already liked medias..") x = 0 while x < len(self.media_by_tag): if ( check_already_liked(self, media_id=self.media_by_tag[x]["node"]["id"]) == 1 ): self.media_by_tag.remove(self.media_by_tag[x]) else: x += 1 def new_auto_mod_like(self): if ( time.time() > self.next_iteration["Like"] and self.like_per_day != 0 and len(self.media_by_tag) > 0 ): # You have media_id to like: if self.like_all_exist_media(media_size=1, delay=False): # If like go to sleep: self.next_iteration["Like"] = time.time() + self.add_time( self.like_delay ) # Count this tag likes: self.this_tag_like_count += 1 if self.this_tag_like_count >= self.max_tag_like_count: self.media_by_tag = [0] # Del first media_id try: del self.media_by_tag[0] except: print("Could not remove media") def new_auto_mod_follow(self): username = None if time.time() < self.next_iteration["Follow"]: return if ( time.time() > self.next_iteration["Follow"] and self.follow_per_day != 0 and len(self.media_by_tag) > 0 ): if self.media_by_tag[0]["node"]["owner"]["id"] == self.user_id: self.write_log("Keep calm - It's your own profile ;)") return if self.user_min_follow != 0 or self.user_max_follow != 0: try: username = self.get_username_by_user_id( self.media_by_tag[0]["node"]["owner"]["id"] ) url = self.url_user_detail % (username) r = self.s.get(url) all_data = json.loads( re.search( "window._sharedData = (.*?);</script>", r.text, re.DOTALL ).group(1) ) followers = all_data["entry_data"]["ProfilePage"][0]["graphql"][ "user" ]["edge_followed_by"]["count"] if followers < self.user_min_follow: self.write_log( f"Won't follow {username}: does not meet user_min_follow requirement" ) return if self.user_max_follow != 0 and followers > self.user_max_follow: self.write_log( f"Won't follow {username}: does not meet user_max_follow requirement" ) return except Exception: pass if ( check_already_followed( self, user_id=self.media_by_tag[0]["node"]["owner"]["id"] ) == 1 ): self.write_log( f"Already followed before {self.media_by_tag[0]['node']['owner']['id']}" ) self.next_iteration["Follow"] = time.time() + self.add_time( self.follow_delay / 2 ) return log_string = ( f"Trying to follow: {self.media_by_tag[0]['node']['owner']['id']}" ) self.write_log(log_string) self.next_iteration["Follow"] = time.time() + self.add_time( self.follow_delay ) if ( self.follow( user_id=self.media_by_tag[0]["node"]["owner"]["id"], username=username, ) is not False ): self.bot_follow_list.append( [self.media_by_tag[0]["node"]["owner"]["id"], time.time()] ) self.next_iteration["Follow"] = time.time() + self.add_time( self.follow_delay ) def populate_from_feed(self): self.get_media_id_recent_feed() try: for mediafeed_user in self.media_on_feed: feed_username = mediafeed_user["node"]["owner"]["username"] feed_user_id = mediafeed_user["node"]["owner"]["id"] # print(check_if_userid_exists(self, userid=feed_user_id)) if check_if_userid_exists(self, userid=feed_user_id) is False: insert_username(self, user_id=feed_user_id, username=feed_username) self.write_log(f"Inserted user {feed_username} from recent feed") except: self.write_log("Notice: could not populate from recent feed") def new_auto_mod_unfollow(self): if time.time() > self.next_iteration["Unfollow"] and self.unfollow_per_day != 0: if (time.time() - self.bot_start_ts) < 30: # let bot initialize return if get_username_row_count(self) < 20: self.write_log( f" >>>Waiting for database to populate before unfollowing (progress {str(get_username_row_count(self))} /20)" ) if self.unfollow_recent_feed is True: self.write_log("Will try to populate using recent feed") self.populate_from_feed() self.next_iteration["Unfollow"] = time.time() + ( self.add_time(self.unfollow_delay) / 2 ) return # DB doesn't have enough followers yet if self.bot_mode == 0 or self.bot_mode == 3: try: if ( time.time() > self.next_iteration["Populate"] and self.unfollow_recent_feed is True ): self.populate_from_feed() self.next_iteration["Populate"] = time.time() + ( self.add_time(360) ) except: self.write_log( "Notice: Could not populate from recent feed right now" ) log_string = f"Trying to unfollow #{self.unfollow_counter + 1}:" self.write_log(log_string) self.auto_unfollow() self.next_iteration["Unfollow"] = time.time() + self.add_time( self.unfollow_delay ) if self.bot_mode == 1: unfollow_protocol(self) def new_auto_mod_comments(self): if ( time.time() > self.next_iteration["Comments"] and self.comments_per_day != 0 and len(self.media_by_tag) > 0 and self.check_exisiting_comment(self.media_by_tag[0]["node"]["shortcode"]) is False ): comment_text = self.generate_comment() log_string = f"Trying to comment: {self.media_by_tag[0]['node']['id']}" self.write_log(log_string) if ( self.comment(self.media_by_tag[0]["node"]["id"], comment_text) is not False ): self.next_iteration["Comments"] = time.time() + self.add_time( self.comments_delay ) def add_time(self, time): """ Make some random for next iteration""" return time * 0.9 + time * 0.2 * random.random() def generate_comment(self): c_list = list(itertools.product(*self.comment_list)) repl = [(" ", " "), (" .", "."), (" !", "!")] res = " ".join(random.choice(c_list)) for s, r in repl: res = res.replace(s, r) return res.capitalize() def check_exisiting_comment(self, media_code): url_check = self.url_media % (media_code) try: check_comment = self.s.get(url_check) if check_comment.status_code == 200: if "dialog-404" in check_comment.text: self.write_log( f"Tried to comment {media_code} but it doesn't exist (404). Resuming..." ) del self.media_by_tag[0] return True all_data = json.loads( re.search( "window._sharedData = (.*?);", check_comment.text, re.DOTALL ).group(1) )["entry_data"]["PostPage"][ 0 ] # window._sharedData = (.*?); if ( all_data["graphql"]["shortcode_media"]["owner"]["id"] == self.user_id ): self.write_log("Keep calm - It's your own media ;)") # Del media to don't loop on it del self.media_by_tag[0] return True try: comment_list = list( all_data["graphql"]["shortcode_media"]["edge_media_to_comment"][ "edges" ] ) except: comment_list = list( all_data["graphql"]["shortcode_media"][ "edge_media_to_parent_comment" ]["edges"] ) for d in comment_list: if d["node"]["owner"]["id"] == self.user_id: self.write_log("Keep calm - Media already commented ;)") # Del media to don't loop on it del self.media_by_tag[0] return True return False elif check_comment.status_code == 404: insert_media( self, self.media_by_tag[0]["node"]["id"], str(check_comment.status_code), ) self.write_log( f"Tried to comment {media_code} but it doesn't exist (404). Resuming..." ) del self.media_by_tag[0] return True else: insert_media( self, self.media_by_tag[0]["node"]["id"], str(check_comment.status_code), ) self.media_by_tag.remove(self.media_by_tag[0]) return True except: self.write_log("Couldn't comment post, resuming.") del self.media_by_tag[0] return True def auto_unfollow(self): checking = True while checking: username_row = get_username_to_unfollow_random(self) if not username_row: self.write_log("Looks like there is nobody to unfollow.") return False current_id = username_row[0] current_user = username_row[1] unfollow_count = username_row[2] if not current_user: current_user = self.get_username_by_user_id(user_id=current_id) if not current_user: log_string = "api limit reached from instagram. Will try later" self.write_log(log_string) return False for wluser in self.unfollow_whitelist: if wluser == current_user: log_string = "found whitelist user, starting search again" self.write_log(log_string) break else: checking = False if self.login_status: log_string = f"Getting user info : {current_user}" self.write_log(log_string) if self.login_status == 1: url_tag = self.url_user_detail % (current_user) try: r = self.s.get(url_tag) if ( r.text.find( "The link you followed may be broken, or the page may have been removed." ) != -1 ): log_string = ( f"Looks like account was deleted, skipping : {current_user}" ) self.write_log(log_string) insert_unfollow_count(self, user_id=current_id) time.sleep(3) return False all_data = json.loads( re.search( "window._sharedData = (.*?);</script>", r.text, re.DOTALL ).group(1) )["entry_data"]["ProfilePage"][0] user_info = all_data["graphql"]["user"] i = 0 log_string = "Checking user info.." self.write_log(log_string) follows = user_info["edge_follow"]["count"] follower = user_info["edge_followed_by"]["count"] media = user_info["edge_owner_to_timeline_media"]["count"] follow_viewer = user_info["follows_viewer"] followed_by_viewer = user_info["followed_by_viewer"] requested_by_viewer = user_info["requested_by_viewer"] has_requested_viewer = user_info["has_requested_viewer"] log_string = f"Follower : {follower}" self.write_log(log_string) log_string = f"Following : {follows}" self.write_log(log_string) log_string = f"Media : {media}" self.write_log(log_string) if follows == 0 or follower / follows > 2: self.is_selebgram = True self.is_fake_account = False self.write_log(" >>>This is probably Selebgram account") elif follower == 0 or follows / follower > 2: self.is_fake_account = True self.is_selebgram = False self.write_log(" >>>This is probably Fake account") else: self.is_selebgram = False self.is_fake_account = False self.write_log(" >>>This is a normal account") if media > 0 and follows / media < 25 and follower / media < 25: self.is_active_user = True self.write_log(" >>>This user is active") else: self.is_active_user = False self.write_log(" >>>This user is passive") if follow_viewer or has_requested_viewer: self.is_follower = True self.write_log(" >>>This account is following you") else: self.is_follower = False self.write_log(" >>>This account is NOT following you") if followed_by_viewer or requested_by_viewer: self.is_following = True self.write_log(" >>>You are following this account") else: self.is_following = False self.write_log(" >>>You are NOT following this account") except: logging.exception("Except on auto_unfollow!") time.sleep(3) return False else: return False if ( self.is_selebgram is not False or self.is_fake_account is not False or self.is_active_user is not True or self.is_follower is not True ): self.write_log(current_user) self.unfollow(current_id) # don't insert unfollow count as it is done now inside unfollow() # insert_unfollow_count(self, user_id=current_id) elif self.is_following is not True: # we are not following this account, hence we unfollowed it, let's keep track insert_unfollow_count(self, user_id=current_id) def unfollow_recent_feed(self): if len(self.media_on_feed) == 0: self.get_media_id_recent_feed() if ( len(self.media_on_feed) != 0 and self.is_follower_number < 5 and time.time() > self.next_iteration["Unfollow"] and self.unfollow_per_day != 0 ): self.get_media_id_recent_feed() chooser = random.randint(0, len(self.media_on_feed) - 1) self.current_user = self.media_on_feed[chooser]["node"]["owner"]["username"] self.current_id = self.media_on_feed[chooser]["node"]["owner"]["id"] current_user = self.current_user current_id = self.current_id if self.login_status: log_string = f"Getting user info : {current_user}" self.write_log(log_string) if self.login_status == 1: url_tag = self.url_user_detail % (current_user) try: r = self.s.get(url_tag) if ( r.text.find( "The link you followed may be broken, or the page may have been removed." ) != -1 ): log_string = ( f"Looks like account was deleted, skipping : {current_user}" ) self.write_log(log_string) insert_unfollow_count(self, user_id=current_id) time.sleep(3) return False all_data = json.loads( re.search( "window._sharedData = (.*?);</script>", r.text, re.DOTALL ).group(1) )["entry_data"]["ProfilePage"][0] user_info = all_data["graphql"]["user"] i = 0 log_string = "Checking user info.." self.write_log(log_string) follows = user_info["edge_follow"]["count"] follower = user_info["edge_followed_by"]["count"] media = user_info["edge_owner_to_timeline_media"]["count"] follow_viewer = user_info["follows_viewer"] followed_by_viewer = user_info["followed_by_viewer"] requested_by_viewer = user_info["requested_by_viewer"] has_requested_viewer = user_info["has_requested_viewer"] log_string = f"Follower : {follower}" self.write_log(log_string) log_string = f"Following : {follows}" self.write_log(log_string) log_string = f"Media : {media}" self.write_log(log_string) if follows == 0 or follower / follows > 2: self.is_selebgram = True self.is_fake_account = False self.write_log(" >>>This is probably Selebgram account") elif follower == 0 or follows / follower > 2: self.is_fake_account = True self.is_selebgram = False self.write_log(" >>>This is probably Fake account") else: self.is_selebgram = False self.is_fake_account = False self.write_log(" >>>This is a normal account") if media > 0 and follows / media < 25 and follower / media < 25: self.is_active_user = True self.write_log(" >>>This user is active") else: self.is_active_user = False self.write_log(" >>>This user is passive") if follow_viewer or has_requested_viewer: self.is_follower = True self.write_log(" >>>This account is following you") else: self.is_follower = False self.write_log(" >>>This account is NOT following you") if followed_by_viewer or requested_by_viewer: self.is_following = True self.write_log(" >>>You are following this account") else: self.is_following = False self.write_log(" >>>You are NOT following this account") except: logging.exception("Except on auto_unfollow!") time.sleep(3) return False else: return False if ( self.is_selebgram is not False or self.is_fake_account is not False or self.is_active_user is not True or self.is_follower is not True ): self.write_log(current_user) self.unfollow(current_id) self.next_iteration["Unfollow"] = time.time() + self.add_time( self.unfollow_delay ) # don't insert unfollow count as it is done now inside unfollow() # insert_unfollow_count(self, user_id=current_id) elif self.is_following is not True: # we are not following this account, hence we unfollowed it, let's keep track insert_unfollow_count(self, user_id=current_id) time.sleep(8) def get_media_id_recent_feed(self): if self.login_status: now_time = datetime.datetime.now() log_string = f"{self.user_login} : Get media id on recent feed" self.write_log(log_string) if self.login_status == 1: url_tag = "https://www.instagram.com/" try: r = self.s.get(url_tag) jsondata = re.search( "additionalDataLoaded\('feed',({.*})\);", r.text ).group(1) all_data = json.loads(jsondata.strip()) self.media_on_feed = list( all_data["user"]["edge_web_feed_timeline"]["edges"] ) log_string = f"Media in recent feed = {len(self.media_on_feed)}" self.write_log(log_string) except: logging.exception("get_media_id_recent_feed") self.media_on_feed = [] time.sleep(20) return 0 else: return 0 def write_log(self, log_text): """ Write log by print() or logger """ if self.log_mod == 0: try: now_time = datetime.datetime.now() print(f"{now_time.strftime('%d.%m.%Y_%H:%M')} {log_text}") except UnicodeEncodeError: print("Your text has unicode problem!") elif self.log_mod == 1: # Create log_file if not exist. if self.log_file == 0: self.log_file = 1 now_time = datetime.datetime.now() self.log_full_path = "%s%s_%s.log" % ( self.log_file_path, self.user_login, now_time.strftime("%d.%m.%Y_%H:%M"), ) formatter = logging.Formatter("%(asctime)s - %(name)s " "- %(message)s") self.logger = logging.getLogger(self.user_login) self.hdrl = logging.FileHandler(self.log_full_path, mode="w") self.hdrl.setFormatter(formatter) self.logger.setLevel(level=logging.INFO) self.logger.addHandler(self.hdrl) # Log to log file. try: self.logger.info(log_text) except UnicodeEncodeError: print("Your text has unicode problem!")
import discord from discord.ext import commands from discord.ext.commands.core import has_permissions from Tools.utils import getConfig, updateConfig class AntiSpamCog(commands.Cog, name="Anti Spamming"): """ Enable or Disable Anti-Spam Property of Garuda. When enabled, Garuda will warn or kick member if they try to spam in chat. """ def __init__(self, bot): self.bot = bot @commands.command(name = "antispam", usage = "<true|false>") @has_permissions(administrator = True) async def antispam(self, ctx, antispam): """Enable or Disable Anti-Spam Property of Garuda.""" # get config of particular guild data = getConfig(ctx.guild.id) # data = json.load(config) if antispam == "true": data['antiSpam'] = True antispam_enable_embed = discord.Embed( title = "**ANTI-SPAM ENABLED**", description = "🦅: Anti-Spam Has Been Enabled 👁‍🗨", color = 0x00FF00 # Green Color ) updateConfig(ctx.guild.id, data) await ctx.channel.send(embed = antispam_enable_embed) elif antispam == "false": data['antiSpam'] = False antispam_disable_embed = discord.Embed( title = "**ANTI-SPAM DISABLED**", description = "🦅: Anti-Spam Has Been Disabled ❌", color = 0xFF0000 # Red Color ) updateConfig(ctx.guild.id, data) await ctx.channel.send(embed = antispam_disable_embed) else: antispam_wrong_input_embed = discord.Embed( title = "**illegal Input Provided**", description = f"🦅: illegal Input Provided.\nLegal values: `true`, `false`\nCurrently Anti-Spam is `{"Enabled" if data["antiSpam"] else "Disabled"}`", color = 0xFFA500 # Orange ) await ctx.channel.send(embed = antispam_wrong_input_embed) # Setup Bot def setup(bot): bot.add_cog(AntiSpamCog(bot))
import discord from discord.ext import commands from discord.ext.commands.core import has_permissions from Tools.utils import getConfig, updateConfig class AntiSpamCog(commands.Cog, name="Anti Spamming"): """ Enable or Disable Anti-Spam Property of Garuda. When enabled, Garuda will warn or kick member if they try to spam in chat. """ def __init__(self, bot): self.bot = bot @commands.command(name = "antispam", usage = "<true|false>") @has_permissions(administrator = True) async def antispam(self, ctx, antispam): """Enable or Disable Anti-Spam Property of Garuda.""" # get config of particular guild data = getConfig(ctx.guild.id) # data = json.load(config) if antispam == "true": data['antiSpam'] = True antispam_enable_embed = discord.Embed( title = "**ANTI-SPAM ENABLED**", description = "🦅: Anti-Spam Has Been Enabled 👁‍🗨", color = 0x00FF00 # Green Color ) updateConfig(ctx.guild.id, data) await ctx.channel.send(embed = antispam_enable_embed) elif antispam == "false": data['antiSpam'] = False antispam_disable_embed = discord.Embed( title = "**ANTI-SPAM DISABLED**", description = "🦅: Anti-Spam Has Been Disabled ❌", color = 0xFF0000 # Red Color ) updateConfig(ctx.guild.id, data) await ctx.channel.send(embed = antispam_disable_embed) else: antispam_wrong_input_embed = discord.Embed( title = "**illegal Input Provided**", description = f"🦅: illegal Input Provided.\nLegal values: `true`, `false`\nCurrently Anti-Spam is `{'Enabled' if data['antiSpam'] else 'Disabled'}`", color = 0xFFA500 # Orange ) await ctx.channel.send(embed = antispam_wrong_input_embed) # Setup Bot def setup(bot): bot.add_cog(AntiSpamCog(bot))
from dataclasses import asdict, dataclass import asyncio import base64 import binascii import chevron import hashlib import json import logging import re from aiohttp import ClientConnectionError, ClientSession, ClientTimeout from arq import Retry, cron from arq.utils import to_unix_ms from arq.worker import run_worker as arq_run_worker from asyncio import TimeoutError from buildpg import MultipleValues, Values, asyncpg from chevron import ChevronError from datetime import datetime, timezone from enum import Enum from itertools import chain from pathlib import Path from phonenumbers import ( NumberParseException, PhoneNumberFormat, PhoneNumberType, format_number, is_valid_number, number_type, parse as parse_number, ) from phonenumbers.geocoder import country_name_for_number, description_for_number from pydantic.datetime_parse import parse_datetime from typing import Dict, List, Optional from ua_parser.user_agent_parser import Parse as ParseUserAgent from .ext import ApiError, Mandrill, MessageBird from .models import ( THIS_DIR, AttachmentModel, BaseWebhook, EmailRecipientModel, EmailSendMethod, EmailSendModel, MandrillWebhook, MessageStatus, SendMethod, SmsRecipientModel, SmsSendMethod, SmsSendModel, ) from .render import EmailInfo, render_email from .render.main import MessageDef, MessageTooLong, SmsLength, apply_short_links, sms_length from .settings import Settings test_logger = logging.getLogger('morpheus.worker.test') main_logger = logging.getLogger('morpheus.worker') MOBILE_NUMBER_TYPES = PhoneNumberType.MOBILE, PhoneNumberType.FIXED_LINE_OR_MOBILE ONE_DAY = 86400 ONE_YEAR = ONE_DAY * 365 STYLES_SASS = (THIS_DIR / 'extra' / 'default-styles.scss').read_text() worker_functions = [] def worker_function(f): worker_functions.append(f) return f class MessageBirdExternalError(Exception): pass @dataclass class EmailJob: group_id: int group_uuid: str send_method: str first_name: str last_name: str user_link: int address: str tags: List[str] pdf_attachments: List[dict] attachments: List[dict] main_template: str mustache_partials: Dict[str, dict] macros: Dict[str, dict] subject_template: str company_code: str from_email: str from_name: str subaccount: str important: bool context: dict headers: dict @dataclass class SmsJob: group_id: str group_uuid: str send_method: str first_name: str last_name: str user_link: int number: str tags: List[str] main_template: str company_code: str country_code: str from_name: str context: dict @dataclass class Number: number: str country_code: str number_formatted: str descr: str is_mobile: bool @dataclass class SmsData: number: Number message: str shortened_link: dict length: SmsLength class UpdateStatus(str, Enum): duplicate = 'duplicate' missing = 'missing' added = 'added' async def startup(ctx): settings = ctx.get('settings') or Settings() ctx.update( settings=settings, email_click_url=f'https://{settings.click_host_name}/l', sms_click_url=f'{settings.click_host_name}/l', pg=ctx.get('pg') or await asyncpg.create_pool_b(dsn=settings.pg_dsn, min_size=2), session=ClientSession(timeout=ClientTimeout(total=30)), mandrill=Mandrill(settings=settings), messagebird=MessageBird(settings=settings), ) async def shutdown(ctx): await asyncio.gather(ctx['session'].close(), ctx['pg'].close(), ctx['mandrill'].close(), ctx['messagebird'].close()) email_retrying = [5, 10, 60, 600, 1800, 3600, 12 * 3600] @worker_function async def send_email(ctx, group_id: int, company_id: int, recipient: EmailRecipientModel, m: EmailSendModel): s = SendEmail(ctx, group_id, company_id, recipient, m) return await s.run() class SendEmail: __slots__ = 'ctx', 'settings', 'recipient', 'group_id', 'company_id', 'm', 'tags' def __init__(self, ctx: dict, group_id: int, company_id: int, recipient: EmailRecipientModel, m: EmailSendModel): self.ctx = ctx self.settings: Settings = ctx['settings'] self.group_id = group_id self.company_id = company_id self.recipient: EmailRecipientModel = recipient self.m: EmailSendModel = m self.tags = list(set(self.recipient.tags + self.m.tags + [str(self.m.uid)])) async def run(self): main_logger.info('Sending email to %s via %s', self.recipient.address, self.m.method) if self.ctx['job_try'] > len(email_retrying): main_logger.error('%s: tried to send email %d times, all failed', self.group_id, self.ctx['job_try']) await self._store_email_failed(MessageStatus.send_request_failed, 'upstream error') return context = dict(self.m.context, **self.recipient.context) if 'styles__sass' not in context and re.search(r'\{\{\{ *styles *\}\}\}', self.m.main_template): context['styles__sass'] = STYLES_SASS headers = dict(self.m.headers, **self.recipient.headers) email_info = await self._render_email(context, headers) if not email_info: return attachments = [a async for a in self._generate_base64_pdf(self.recipient.pdf_attachments)] attachments += [a async for a in self._generate_base64(self.recipient.attachments)] if self.m.method == EmailSendMethod.email_mandrill: if self.recipient.address.endswith('@example.com'): _id = re.sub(r'[^a-zA-Z0-9\-]', '', f'mandrill-{self.recipient.address}') await self._store_email(_id, utcnow(), email_info) else: await self._send_mandrill(email_info, attachments) elif self.m.method == EmailSendMethod.email_test: await self._send_test_email(email_info, attachments) else: raise NotImplementedError() async def _send_mandrill(self, email_info: EmailInfo, attachments: List[dict]): data = { 'async': True, 'message': dict( html=email_info.html_body, subject=email_info.subject, from_email=self.m.from_address.email, from_name=self.m.from_address.name, to=[dict(email=self.recipient.address, name=email_info.full_name, type='to')], headers=email_info.headers, track_opens=True, track_clicks=False, auto_text=True, view_content_link=False, signing_domain=self.m.from_address.email[self.m.from_address.email.index('@') + 1 :], subaccount=self.m.subaccount, tags=self.tags, inline_css=True, important=self.m.important, attachments=attachments, ), } send_ts = utcnow() job_try = self.ctx['job_try'] defer = email_retrying[job_try - 1] try: response = await self.ctx['mandrill'].post('messages/send.json', **data) except (ClientConnectionError, TimeoutError) as e: main_logger.info('client connection error group_id=%s job_try=%s defer=%ss', self.group_id, job_try, defer) raise Retry(defer=defer) from e except ApiError as e: if e.status in {502, 504} or (e.status == 500 and '<center>nginx/' in e.body): main_logger.info( 'temporary mandrill error group_id=%s status=%s job_try=%s defer=%ss', self.group_id, e.status, job_try, defer, ) raise Retry(defer=defer) from e else: # if the status is not 502 or 504, or 500 from nginx then raise raise data = await response.json() assert len(data) == 1, data data = data[0] assert data['email'] == self.recipient.address, data await self._store_email(data['_id'], send_ts, email_info) async def _send_test_email(self, email_info: EmailInfo, attachments: List[dict]): data = dict( from_email=self.m.from_address.email, from_name=self.m.from_address.name, group_uuid=str(self.m.uid), headers=email_info.headers, to_address=self.recipient.address, to_name=email_info.full_name, to_user_link=self.recipient.user_link, tags=self.tags, important=self.m.important, attachments=[ f'{a['name']}:{base64.b64decode(a['content']).decode(errors='ignore'):.40}' for a in attachments ], ) msg_id = re.sub(r'[^a-zA-Z0-9\-]', '', f'{self.m.uid}-{self.recipient.address}') send_ts = utcnow() output = ( f'to: {self.recipient.address}\n' f'msg id: {msg_id}\n' f'ts: {send_ts}\n' f'subject: {email_info.subject}\n' f'data: {json.dumps(data, indent=2)}\n' f'content:\n' f'{email_info.html_body}\n' ) if self.settings.test_output: # pragma: no branch Path.mkdir(self.settings.test_output, parents=True, exist_ok=True) save_path = self.settings.test_output / f'{msg_id}.txt' test_logger.info('sending message: %s (saved to %s)', output, save_path) save_path.write_text(output) await self._store_email(msg_id, send_ts, email_info) async def _render_email(self, context, headers) -> Optional[EmailInfo]: m = MessageDef( first_name=self.recipient.first_name, last_name=self.recipient.last_name, main_template=self.m.main_template, mustache_partials=self.m.mustache_partials, macros=self.m.macros, subject_template=self.m.subject_template, context=context, headers=headers, ) try: return render_email(m, self.ctx['email_click_url']) except ChevronError as e: await self._store_email_failed(MessageStatus.render_failed, f'Error rendering email: {e}') async def _generate_base64_pdf(self, pdf_attachments): headers = dict(pdf_page_size='A4', pdf_zoom='1.25', pdf_margin_left='8mm', pdf_margin_right='8mm') for a in pdf_attachments: async with self.ctx['session'].get(self.settings.pdf_generation_url, data=a.html, headers=headers) as r: if r.status == 200: pdf_content = await r.read() yield dict(type='application/pdf', name=a.name, content=base64.b64encode(pdf_content).decode()) else: data = await r.text() main_logger.warning('error generating pdf %s, data: %s', r.status, data) async def _generate_base64(self, attachments: List[AttachmentModel]): for attachment in attachments: try: # Check to see if content can be decoded from base64 base64.b64decode(attachment.content, validate=True) except binascii.Error: # Content has not yet been base64 encoded so needs to be encoded content = base64.b64encode(attachment.content).decode() else: # Content has already been base64 encoded so just pass content through content = attachment.content.decode() yield dict(name=attachment.name, type=attachment.mime_type, content=content) async def _store_email(self, external_id, send_ts, email_info: EmailInfo): data = dict( external_id=external_id, group_id=self.group_id, company_id=self.company_id, method=self.m.method, send_ts=send_ts, status=MessageStatus.send, to_first_name=self.recipient.first_name, to_last_name=self.recipient.last_name, to_user_link=self.recipient.user_link, to_address=self.recipient.address, tags=self.tags, subject=email_info.subject, body=email_info.html_body, ) attachments = [ f'{getattr(a, 'id', None) or ''}::{a.name}' for a in chain(self.recipient.pdf_attachments, self.recipient.attachments) ] if attachments: data['attachments'] = attachments message_id = await self.ctx['pg'].fetchval_b( 'insert into messages (:values__names) values :values returning id', values=Values(**data) ) if email_info.shortened_link: await self.ctx['pg'].execute_b( 'insert into links (:values__names) values :values', values=MultipleValues( *[Values(message_id=message_id, token=token, url=url) for url, token in email_info.shortened_link] ), ) async def _store_email_failed(self, status: MessageStatus, error_msg): await self.ctx['pg'].execute_b( 'insert into messages (:values__names) values :values', values=Values( group_id=self.group_id, company_id=self.company_id, method=self.m.method, status=status, to_first_name=self.recipient.first_name, to_last_name=self.recipient.last_name, to_user_link=self.recipient.user_link, to_address=self.recipient.address, tags=self.tags, body=error_msg, ), ) @worker_function async def send_sms(ctx, group_id: int, company_id: int, recipient: SmsRecipientModel, m: SmsSendModel): s = SendSMS(ctx, group_id, company_id, recipient, m) return await s.run() class SendSMS: __slots__ = 'ctx', 'settings', 'recipient', 'group_id', 'company_id', 'm', 'tags', 'messagebird', 'from_name' def __init__(self, ctx: dict, group_id: int, company_id: int, recipient: SmsRecipientModel, m: SmsSendModel): self.ctx = ctx self.settings: Settings = ctx['settings'] self.group_id = group_id self.company_id = company_id self.recipient: SmsRecipientModel = recipient self.m: SmsSendModel = m self.tags = list(set(self.recipient.tags + self.m.tags + [str(self.m.uid)])) self.messagebird: MessageBird = ctx['messagebird'] self.from_name = self.m.from_name if self.m.country_code != 'US' else self.settings.us_send_number async def run(self): sms_data = await self._sms_prep() if not sms_data: return if self.m.method == SmsSendMethod.sms_test: await self._test_send_sms(sms_data) elif self.m.method == SmsSendMethod.sms_messagebird: await self._messagebird_send_sms(sms_data) else: raise NotImplementedError() async def _sms_prep(self) -> Optional[SmsData]: number_info = validate_number(self.recipient.number, self.m.country_code, include_description=False) msg, error, shortened_link, msg_length = None, None, None, None if not number_info or not number_info.is_mobile: error = f'invalid mobile number "{self.recipient.number}"' main_logger.warning( 'invalid mobile number "%s" for "%s", not sending', self.recipient.number, self.m.company_code ) else: context = dict(self.m.context, **self.recipient.context) shortened_link = apply_short_links(context, self.ctx['sms_click_url'], 12) try: msg = chevron.render(self.m.main_template, data=context) except ChevronError as e: error = f'Error rendering SMS: {e}' else: try: msg_length = sms_length(msg) except MessageTooLong as e: error = str(e) if error: await self.ctx['pg'].execute_b( 'insert into messages (:values__names) values :values', values=Values( group_id=self.group_id, company_id=self.company_id, method=self.m.method, status=MessageStatus.render_failed, to_first_name=self.recipient.first_name, to_last_name=self.recipient.last_name, to_user_link=self.recipient.user_link, to_address=number_info.number_formatted if number_info else self.recipient.number, tags=self.tags, body=error, ), ) else: return SmsData(number=number_info, message=msg, shortened_link=shortened_link, length=msg_length) async def _test_send_sms(self, sms_data: SmsData): # remove the + from the beginning of the number msg_id = f'{self.m.uid}-{sms_data.number.number[1:]}' send_ts = utcnow() cost = 0.012 * sms_data.length.parts output = ( f'to: {sms_data.number}\n' f'msg id: {msg_id}\n' f'ts: {send_ts}\n' f'group_id: {self.group_id}\n' f'tags: {self.tags}\n' f'company_code: {self.m.company_code}\n' f'from_name: {self.from_name}\n' f'cost: {cost}\n' f'length: {sms_data.length}\n' f'message:\n' f'{sms_data.message}\n' ) if self.settings.test_output: # pragma: no branch Path.mkdir(self.settings.test_output, parents=True, exist_ok=True) save_path = self.settings.test_output / f'{msg_id}.txt' test_logger.info('sending message: %s (saved to %s)', output, save_path) save_path.write_text(output) await self._store_sms(msg_id, send_ts, sms_data, cost) async def _messagebird_get_mcc_cost(self, redis, mcc): rates_key = 'messagebird-rates' if not await redis.exists(rates_key): # get fresh data on rates by mcc main_logger.info('getting fresh pricing data from messagebird...') r = await self.messagebird.get('pricing/sms/outbound') if r.status != 200: response = await r.text() main_logger.error('error getting messagebird api', extra={'status': r.status, 'response': response}) raise MessageBirdExternalError((r.status, response)) data = await r.json() prices = data['prices'] if not next((1 for g in prices if g['mcc'] == '0'), None): main_logger.error('no default messagebird pricing with mcc "0"', extra={'prices': prices}) prices = {g['mcc']: f'{float(g['price']):0.5f}' for g in prices} await asyncio.gather(redis.hmset_dict(rates_key, prices), redis.expire(rates_key, ONE_DAY)) rate = await redis.hget(rates_key, mcc, encoding='utf8') if not rate: main_logger.warning('no rate found for mcc: "%s", using default', mcc) rate = await redis.hget(rates_key, '0', encoding='utf8') assert rate, f'no rate found for mcc: {mcc}' return float(rate) async def _messagebird_get_number_cost(self, number: Number): cc_mcc_key = f'messagebird-cc:{number.country_code}' with await self.ctx['redis'] as redis: mcc = await redis.get(cc_mcc_key) if mcc is None: main_logger.info('no mcc for %s, doing HLR lookup...', number.number) api_number = number.number.replace('+', '') await self.messagebird.post(f'lookup/{api_number}/hlr') network, hlr = None, None for i in range(30): r = await self.messagebird.get(f'lookup/{api_number}') data = await r.json() hlr = data.get('hlr') if not hlr: continue network = hlr.get('network') if not network: continue elif hlr['status'] == 'active': main_logger.info( 'found result for %s after %d attempts %s', number.number, i, json.dumps(data, indent=2) ) break await asyncio.sleep(1) if not hlr or not network: main_logger.warning('No HLR result found for %s after 30 attempts', number.number, extra=data) return mcc = str(network)[:3] await redis.setex(cc_mcc_key, ONE_YEAR, mcc) return await self._messagebird_get_mcc_cost(redis, mcc) async def _messagebird_send_sms(self, sms_data: SmsData): try: msg_cost = await self._messagebird_get_number_cost(sms_data.number) except MessageBirdExternalError: msg_cost = 0 # Set to SMS cost to 0 until cost API is working/changed if msg_cost is None: return cost = sms_data.length.parts * msg_cost send_ts = utcnow() main_logger.info( 'sending SMS to %s, parts: %d, cost: %0.2fp', sms_data.number.number, sms_data.length.parts, cost * 100 ) r = await self.messagebird.post( 'messages', originator=self.from_name, body=sms_data.message, recipients=[sms_data.number.number], datacoding='auto', reference='morpheus', # required to prompt status updates to occur allowed_statuses=201, ) data = await r.json() if data['recipients']['totalCount'] != 1: main_logger.error('not one recipients in send response', extra={'data': data}) await self._store_sms(data['id'], send_ts, sms_data, cost) async def _store_sms(self, external_id, send_ts, sms_data: SmsData, cost: float): async with self.ctx['pg'].acquire() as conn: message_id = await conn.fetchval_b( 'insert into messages (:values__names) values :values returning id', values=Values( external_id=external_id, group_id=self.group_id, company_id=self.company_id, method=self.m.method, send_ts=send_ts, status=MessageStatus.send, to_first_name=self.recipient.first_name, to_last_name=self.recipient.last_name, to_user_link=self.recipient.user_link, to_address=sms_data.number.number_formatted, tags=self.tags, body=sms_data.message, cost=cost, extra=json.dumps(asdict(sms_data.length)), ), ) if sms_data.shortened_link: await conn.execute_b( 'insert into links (:values__names) values :values', values=MultipleValues( *[Values(message_id=message_id, token=token, url=url) for url, token in sms_data.shortened_link] ), ) def validate_number(number, country, include_description=True) -> Optional[Number]: try: p = parse_number(number, country) except NumberParseException: return if not is_valid_number(p): return is_mobile = number_type(p) in MOBILE_NUMBER_TYPES descr = None if include_description: country = country_name_for_number(p, 'en') region = description_for_number(p, 'en') descr = country if country == region else f'{region}, {country}' return Number( number=format_number(p, PhoneNumberFormat.E164), country_code=f'{p.country_code}', number_formatted=format_number(p, PhoneNumberFormat.INTERNATIONAL), descr=descr, is_mobile=is_mobile, ) @worker_function async def update_mandrill_webhooks(ctx, events): mandrill_webhook = MandrillWebhook(events=events) statuses = {} for m in mandrill_webhook.events: status = await update_message_status(ctx, SendMethod.email_mandrill, m, log_each=False) if status in statuses: statuses[status] += 1 else: statuses[status] = 1 main_logger.info( 'updating %d messages: %s', len(mandrill_webhook.events), ' '.join(f'{k}={v}' for k, v in statuses.items()) ) return len(mandrill_webhook.events) @worker_function async def store_click(ctx, *, link_id, ip, ts, user_agent): cache_key = f'click-{link_id}-{ip}' with await ctx['redis'] as redis: v = await redis.incr(cache_key) if v > 1: return 'recently_clicked' await redis.expire(cache_key, 60) async with ctx['pg'].acquire() as conn: message_id, target = await conn.fetchrow('select message_id, url from links where id=$1', link_id) extra = {'target': target, 'ip': ip, 'user_agent': user_agent} if user_agent: ua_dict = ParseUserAgent(user_agent) platform = ua_dict['device']['family'] if platform in {'Other', None}: platform = ua_dict['os']['family'] extra['user_agent_display'] = ( ('{user_agent[family]} {user_agent[major]} on ' '{platform}') .format(platform=platform, **ua_dict) .strip(' ') ) ts = parse_datetime(ts) status = 'click' await conn.execute_b( 'insert into events (:values__names) values :values', values=Values(message_id=message_id, status=status, ts=ts, extra=json.dumps(extra)), ) @worker_function async def update_message_status(ctx, send_method: SendMethod, m: BaseWebhook, log_each=True) -> UpdateStatus: h = hashlib.md5(f'{m.message_id}-{to_unix_ms(m.ts)}-{m.status}-{m.extra_json(sort_keys=True)}'.encode()) ref = f'event-{h.hexdigest()}' with await ctx['redis'] as redis: v = await redis.incr(ref) if v > 1: log_each and main_logger.info( 'event already exists %s, ts: %s, ' 'status: %s. skipped', m.message_id, m.ts, m.status ) return UpdateStatus.duplicate await redis.expire(ref, 86400) async with ctx['pg'].acquire() as conn: message_id = await conn.fetchval( 'select id from messages where method = $1 and external_id = $2', send_method, m.message_id ) if not message_id: return UpdateStatus.missing if not m.ts.tzinfo: m.ts = m.ts.replace(tzinfo=timezone.utc) log_each and main_logger.info('adding event %s, ts: %s, status: %s', m.message_id, m.ts, m.status) await conn.execute_b( 'insert into events (:values__names) values :values', values=Values(message_id=message_id, status=m.status, ts=m.ts, extra=m.extra_json()), ) return UpdateStatus.added async def update_aggregation_view(ctx): await ctx['pg'].execute('refresh materialized view message_aggregation') def utcnow(): return datetime.utcnow().replace(tzinfo=timezone.utc) class WorkerSettings: max_jobs = 20 keep_result = 5 max_tries = len(email_retrying) + 1 # so we try all values in email_retrying functions = worker_functions on_startup = startup on_shutdown = shutdown cron_jobs = [cron(update_aggregation_view, minute=12, timeout=1800)] def run_worker(settings: Settings): # pragma: no cover arq_run_worker(WorkerSettings, redis_settings=settings.redis_settings, ctx={'settings': settings})
from dataclasses import asdict, dataclass import asyncio import base64 import binascii import chevron import hashlib import json import logging import re from aiohttp import ClientConnectionError, ClientSession, ClientTimeout from arq import Retry, cron from arq.utils import to_unix_ms from arq.worker import run_worker as arq_run_worker from asyncio import TimeoutError from buildpg import MultipleValues, Values, asyncpg from chevron import ChevronError from datetime import datetime, timezone from enum import Enum from itertools import chain from pathlib import Path from phonenumbers import ( NumberParseException, PhoneNumberFormat, PhoneNumberType, format_number, is_valid_number, number_type, parse as parse_number, ) from phonenumbers.geocoder import country_name_for_number, description_for_number from pydantic.datetime_parse import parse_datetime from typing import Dict, List, Optional from ua_parser.user_agent_parser import Parse as ParseUserAgent from .ext import ApiError, Mandrill, MessageBird from .models import ( THIS_DIR, AttachmentModel, BaseWebhook, EmailRecipientModel, EmailSendMethod, EmailSendModel, MandrillWebhook, MessageStatus, SendMethod, SmsRecipientModel, SmsSendMethod, SmsSendModel, ) from .render import EmailInfo, render_email from .render.main import MessageDef, MessageTooLong, SmsLength, apply_short_links, sms_length from .settings import Settings test_logger = logging.getLogger('morpheus.worker.test') main_logger = logging.getLogger('morpheus.worker') MOBILE_NUMBER_TYPES = PhoneNumberType.MOBILE, PhoneNumberType.FIXED_LINE_OR_MOBILE ONE_DAY = 86400 ONE_YEAR = ONE_DAY * 365 STYLES_SASS = (THIS_DIR / 'extra' / 'default-styles.scss').read_text() worker_functions = [] def worker_function(f): worker_functions.append(f) return f class MessageBirdExternalError(Exception): pass @dataclass class EmailJob: group_id: int group_uuid: str send_method: str first_name: str last_name: str user_link: int address: str tags: List[str] pdf_attachments: List[dict] attachments: List[dict] main_template: str mustache_partials: Dict[str, dict] macros: Dict[str, dict] subject_template: str company_code: str from_email: str from_name: str subaccount: str important: bool context: dict headers: dict @dataclass class SmsJob: group_id: str group_uuid: str send_method: str first_name: str last_name: str user_link: int number: str tags: List[str] main_template: str company_code: str country_code: str from_name: str context: dict @dataclass class Number: number: str country_code: str number_formatted: str descr: str is_mobile: bool @dataclass class SmsData: number: Number message: str shortened_link: dict length: SmsLength class UpdateStatus(str, Enum): duplicate = 'duplicate' missing = 'missing' added = 'added' async def startup(ctx): settings = ctx.get('settings') or Settings() ctx.update( settings=settings, email_click_url=f'https://{settings.click_host_name}/l', sms_click_url=f'{settings.click_host_name}/l', pg=ctx.get('pg') or await asyncpg.create_pool_b(dsn=settings.pg_dsn, min_size=2), session=ClientSession(timeout=ClientTimeout(total=30)), mandrill=Mandrill(settings=settings), messagebird=MessageBird(settings=settings), ) async def shutdown(ctx): await asyncio.gather(ctx['session'].close(), ctx['pg'].close(), ctx['mandrill'].close(), ctx['messagebird'].close()) email_retrying = [5, 10, 60, 600, 1800, 3600, 12 * 3600] @worker_function async def send_email(ctx, group_id: int, company_id: int, recipient: EmailRecipientModel, m: EmailSendModel): s = SendEmail(ctx, group_id, company_id, recipient, m) return await s.run() class SendEmail: __slots__ = 'ctx', 'settings', 'recipient', 'group_id', 'company_id', 'm', 'tags' def __init__(self, ctx: dict, group_id: int, company_id: int, recipient: EmailRecipientModel, m: EmailSendModel): self.ctx = ctx self.settings: Settings = ctx['settings'] self.group_id = group_id self.company_id = company_id self.recipient: EmailRecipientModel = recipient self.m: EmailSendModel = m self.tags = list(set(self.recipient.tags + self.m.tags + [str(self.m.uid)])) async def run(self): main_logger.info('Sending email to %s via %s', self.recipient.address, self.m.method) if self.ctx['job_try'] > len(email_retrying): main_logger.error('%s: tried to send email %d times, all failed', self.group_id, self.ctx['job_try']) await self._store_email_failed(MessageStatus.send_request_failed, 'upstream error') return context = dict(self.m.context, **self.recipient.context) if 'styles__sass' not in context and re.search(r'\{\{\{ *styles *\}\}\}', self.m.main_template): context['styles__sass'] = STYLES_SASS headers = dict(self.m.headers, **self.recipient.headers) email_info = await self._render_email(context, headers) if not email_info: return attachments = [a async for a in self._generate_base64_pdf(self.recipient.pdf_attachments)] attachments += [a async for a in self._generate_base64(self.recipient.attachments)] if self.m.method == EmailSendMethod.email_mandrill: if self.recipient.address.endswith('@example.com'): _id = re.sub(r'[^a-zA-Z0-9\-]', '', f'mandrill-{self.recipient.address}') await self._store_email(_id, utcnow(), email_info) else: await self._send_mandrill(email_info, attachments) elif self.m.method == EmailSendMethod.email_test: await self._send_test_email(email_info, attachments) else: raise NotImplementedError() async def _send_mandrill(self, email_info: EmailInfo, attachments: List[dict]): data = { 'async': True, 'message': dict( html=email_info.html_body, subject=email_info.subject, from_email=self.m.from_address.email, from_name=self.m.from_address.name, to=[dict(email=self.recipient.address, name=email_info.full_name, type='to')], headers=email_info.headers, track_opens=True, track_clicks=False, auto_text=True, view_content_link=False, signing_domain=self.m.from_address.email[self.m.from_address.email.index('@') + 1 :], subaccount=self.m.subaccount, tags=self.tags, inline_css=True, important=self.m.important, attachments=attachments, ), } send_ts = utcnow() job_try = self.ctx['job_try'] defer = email_retrying[job_try - 1] try: response = await self.ctx['mandrill'].post('messages/send.json', **data) except (ClientConnectionError, TimeoutError) as e: main_logger.info('client connection error group_id=%s job_try=%s defer=%ss', self.group_id, job_try, defer) raise Retry(defer=defer) from e except ApiError as e: if e.status in {502, 504} or (e.status == 500 and '<center>nginx/' in e.body): main_logger.info( 'temporary mandrill error group_id=%s status=%s job_try=%s defer=%ss', self.group_id, e.status, job_try, defer, ) raise Retry(defer=defer) from e else: # if the status is not 502 or 504, or 500 from nginx then raise raise data = await response.json() assert len(data) == 1, data data = data[0] assert data['email'] == self.recipient.address, data await self._store_email(data['_id'], send_ts, email_info) async def _send_test_email(self, email_info: EmailInfo, attachments: List[dict]): data = dict( from_email=self.m.from_address.email, from_name=self.m.from_address.name, group_uuid=str(self.m.uid), headers=email_info.headers, to_address=self.recipient.address, to_name=email_info.full_name, to_user_link=self.recipient.user_link, tags=self.tags, important=self.m.important, attachments=[ f'{a["name"]}:{base64.b64decode(a["content"]).decode(errors="ignore"):.40}' for a in attachments ], ) msg_id = re.sub(r'[^a-zA-Z0-9\-]', '', f'{self.m.uid}-{self.recipient.address}') send_ts = utcnow() output = ( f'to: {self.recipient.address}\n' f'msg id: {msg_id}\n' f'ts: {send_ts}\n' f'subject: {email_info.subject}\n' f'data: {json.dumps(data, indent=2)}\n' f'content:\n' f'{email_info.html_body}\n' ) if self.settings.test_output: # pragma: no branch Path.mkdir(self.settings.test_output, parents=True, exist_ok=True) save_path = self.settings.test_output / f'{msg_id}.txt' test_logger.info('sending message: %s (saved to %s)', output, save_path) save_path.write_text(output) await self._store_email(msg_id, send_ts, email_info) async def _render_email(self, context, headers) -> Optional[EmailInfo]: m = MessageDef( first_name=self.recipient.first_name, last_name=self.recipient.last_name, main_template=self.m.main_template, mustache_partials=self.m.mustache_partials, macros=self.m.macros, subject_template=self.m.subject_template, context=context, headers=headers, ) try: return render_email(m, self.ctx['email_click_url']) except ChevronError as e: await self._store_email_failed(MessageStatus.render_failed, f'Error rendering email: {e}') async def _generate_base64_pdf(self, pdf_attachments): headers = dict(pdf_page_size='A4', pdf_zoom='1.25', pdf_margin_left='8mm', pdf_margin_right='8mm') for a in pdf_attachments: async with self.ctx['session'].get(self.settings.pdf_generation_url, data=a.html, headers=headers) as r: if r.status == 200: pdf_content = await r.read() yield dict(type='application/pdf', name=a.name, content=base64.b64encode(pdf_content).decode()) else: data = await r.text() main_logger.warning('error generating pdf %s, data: %s', r.status, data) async def _generate_base64(self, attachments: List[AttachmentModel]): for attachment in attachments: try: # Check to see if content can be decoded from base64 base64.b64decode(attachment.content, validate=True) except binascii.Error: # Content has not yet been base64 encoded so needs to be encoded content = base64.b64encode(attachment.content).decode() else: # Content has already been base64 encoded so just pass content through content = attachment.content.decode() yield dict(name=attachment.name, type=attachment.mime_type, content=content) async def _store_email(self, external_id, send_ts, email_info: EmailInfo): data = dict( external_id=external_id, group_id=self.group_id, company_id=self.company_id, method=self.m.method, send_ts=send_ts, status=MessageStatus.send, to_first_name=self.recipient.first_name, to_last_name=self.recipient.last_name, to_user_link=self.recipient.user_link, to_address=self.recipient.address, tags=self.tags, subject=email_info.subject, body=email_info.html_body, ) attachments = [ f'{getattr(a, "id", None) or ""}::{a.name}' for a in chain(self.recipient.pdf_attachments, self.recipient.attachments) ] if attachments: data['attachments'] = attachments message_id = await self.ctx['pg'].fetchval_b( 'insert into messages (:values__names) values :values returning id', values=Values(**data) ) if email_info.shortened_link: await self.ctx['pg'].execute_b( 'insert into links (:values__names) values :values', values=MultipleValues( *[Values(message_id=message_id, token=token, url=url) for url, token in email_info.shortened_link] ), ) async def _store_email_failed(self, status: MessageStatus, error_msg): await self.ctx['pg'].execute_b( 'insert into messages (:values__names) values :values', values=Values( group_id=self.group_id, company_id=self.company_id, method=self.m.method, status=status, to_first_name=self.recipient.first_name, to_last_name=self.recipient.last_name, to_user_link=self.recipient.user_link, to_address=self.recipient.address, tags=self.tags, body=error_msg, ), ) @worker_function async def send_sms(ctx, group_id: int, company_id: int, recipient: SmsRecipientModel, m: SmsSendModel): s = SendSMS(ctx, group_id, company_id, recipient, m) return await s.run() class SendSMS: __slots__ = 'ctx', 'settings', 'recipient', 'group_id', 'company_id', 'm', 'tags', 'messagebird', 'from_name' def __init__(self, ctx: dict, group_id: int, company_id: int, recipient: SmsRecipientModel, m: SmsSendModel): self.ctx = ctx self.settings: Settings = ctx['settings'] self.group_id = group_id self.company_id = company_id self.recipient: SmsRecipientModel = recipient self.m: SmsSendModel = m self.tags = list(set(self.recipient.tags + self.m.tags + [str(self.m.uid)])) self.messagebird: MessageBird = ctx['messagebird'] self.from_name = self.m.from_name if self.m.country_code != 'US' else self.settings.us_send_number async def run(self): sms_data = await self._sms_prep() if not sms_data: return if self.m.method == SmsSendMethod.sms_test: await self._test_send_sms(sms_data) elif self.m.method == SmsSendMethod.sms_messagebird: await self._messagebird_send_sms(sms_data) else: raise NotImplementedError() async def _sms_prep(self) -> Optional[SmsData]: number_info = validate_number(self.recipient.number, self.m.country_code, include_description=False) msg, error, shortened_link, msg_length = None, None, None, None if not number_info or not number_info.is_mobile: error = f'invalid mobile number "{self.recipient.number}"' main_logger.warning( 'invalid mobile number "%s" for "%s", not sending', self.recipient.number, self.m.company_code ) else: context = dict(self.m.context, **self.recipient.context) shortened_link = apply_short_links(context, self.ctx['sms_click_url'], 12) try: msg = chevron.render(self.m.main_template, data=context) except ChevronError as e: error = f'Error rendering SMS: {e}' else: try: msg_length = sms_length(msg) except MessageTooLong as e: error = str(e) if error: await self.ctx['pg'].execute_b( 'insert into messages (:values__names) values :values', values=Values( group_id=self.group_id, company_id=self.company_id, method=self.m.method, status=MessageStatus.render_failed, to_first_name=self.recipient.first_name, to_last_name=self.recipient.last_name, to_user_link=self.recipient.user_link, to_address=number_info.number_formatted if number_info else self.recipient.number, tags=self.tags, body=error, ), ) else: return SmsData(number=number_info, message=msg, shortened_link=shortened_link, length=msg_length) async def _test_send_sms(self, sms_data: SmsData): # remove the + from the beginning of the number msg_id = f'{self.m.uid}-{sms_data.number.number[1:]}' send_ts = utcnow() cost = 0.012 * sms_data.length.parts output = ( f'to: {sms_data.number}\n' f'msg id: {msg_id}\n' f'ts: {send_ts}\n' f'group_id: {self.group_id}\n' f'tags: {self.tags}\n' f'company_code: {self.m.company_code}\n' f'from_name: {self.from_name}\n' f'cost: {cost}\n' f'length: {sms_data.length}\n' f'message:\n' f'{sms_data.message}\n' ) if self.settings.test_output: # pragma: no branch Path.mkdir(self.settings.test_output, parents=True, exist_ok=True) save_path = self.settings.test_output / f'{msg_id}.txt' test_logger.info('sending message: %s (saved to %s)', output, save_path) save_path.write_text(output) await self._store_sms(msg_id, send_ts, sms_data, cost) async def _messagebird_get_mcc_cost(self, redis, mcc): rates_key = 'messagebird-rates' if not await redis.exists(rates_key): # get fresh data on rates by mcc main_logger.info('getting fresh pricing data from messagebird...') r = await self.messagebird.get('pricing/sms/outbound') if r.status != 200: response = await r.text() main_logger.error('error getting messagebird api', extra={'status': r.status, 'response': response}) raise MessageBirdExternalError((r.status, response)) data = await r.json() prices = data['prices'] if not next((1 for g in prices if g['mcc'] == '0'), None): main_logger.error('no default messagebird pricing with mcc "0"', extra={'prices': prices}) prices = {g['mcc']: f'{float(g["price"]):0.5f}' for g in prices} await asyncio.gather(redis.hmset_dict(rates_key, prices), redis.expire(rates_key, ONE_DAY)) rate = await redis.hget(rates_key, mcc, encoding='utf8') if not rate: main_logger.warning('no rate found for mcc: "%s", using default', mcc) rate = await redis.hget(rates_key, '0', encoding='utf8') assert rate, f'no rate found for mcc: {mcc}' return float(rate) async def _messagebird_get_number_cost(self, number: Number): cc_mcc_key = f'messagebird-cc:{number.country_code}' with await self.ctx['redis'] as redis: mcc = await redis.get(cc_mcc_key) if mcc is None: main_logger.info('no mcc for %s, doing HLR lookup...', number.number) api_number = number.number.replace('+', '') await self.messagebird.post(f'lookup/{api_number}/hlr') network, hlr = None, None for i in range(30): r = await self.messagebird.get(f'lookup/{api_number}') data = await r.json() hlr = data.get('hlr') if not hlr: continue network = hlr.get('network') if not network: continue elif hlr['status'] == 'active': main_logger.info( 'found result for %s after %d attempts %s', number.number, i, json.dumps(data, indent=2) ) break await asyncio.sleep(1) if not hlr or not network: main_logger.warning('No HLR result found for %s after 30 attempts', number.number, extra=data) return mcc = str(network)[:3] await redis.setex(cc_mcc_key, ONE_YEAR, mcc) return await self._messagebird_get_mcc_cost(redis, mcc) async def _messagebird_send_sms(self, sms_data: SmsData): try: msg_cost = await self._messagebird_get_number_cost(sms_data.number) except MessageBirdExternalError: msg_cost = 0 # Set to SMS cost to 0 until cost API is working/changed if msg_cost is None: return cost = sms_data.length.parts * msg_cost send_ts = utcnow() main_logger.info( 'sending SMS to %s, parts: %d, cost: %0.2fp', sms_data.number.number, sms_data.length.parts, cost * 100 ) r = await self.messagebird.post( 'messages', originator=self.from_name, body=sms_data.message, recipients=[sms_data.number.number], datacoding='auto', reference='morpheus', # required to prompt status updates to occur allowed_statuses=201, ) data = await r.json() if data['recipients']['totalCount'] != 1: main_logger.error('not one recipients in send response', extra={'data': data}) await self._store_sms(data['id'], send_ts, sms_data, cost) async def _store_sms(self, external_id, send_ts, sms_data: SmsData, cost: float): async with self.ctx['pg'].acquire() as conn: message_id = await conn.fetchval_b( 'insert into messages (:values__names) values :values returning id', values=Values( external_id=external_id, group_id=self.group_id, company_id=self.company_id, method=self.m.method, send_ts=send_ts, status=MessageStatus.send, to_first_name=self.recipient.first_name, to_last_name=self.recipient.last_name, to_user_link=self.recipient.user_link, to_address=sms_data.number.number_formatted, tags=self.tags, body=sms_data.message, cost=cost, extra=json.dumps(asdict(sms_data.length)), ), ) if sms_data.shortened_link: await conn.execute_b( 'insert into links (:values__names) values :values', values=MultipleValues( *[Values(message_id=message_id, token=token, url=url) for url, token in sms_data.shortened_link] ), ) def validate_number(number, country, include_description=True) -> Optional[Number]: try: p = parse_number(number, country) except NumberParseException: return if not is_valid_number(p): return is_mobile = number_type(p) in MOBILE_NUMBER_TYPES descr = None if include_description: country = country_name_for_number(p, 'en') region = description_for_number(p, 'en') descr = country if country == region else f'{region}, {country}' return Number( number=format_number(p, PhoneNumberFormat.E164), country_code=f'{p.country_code}', number_formatted=format_number(p, PhoneNumberFormat.INTERNATIONAL), descr=descr, is_mobile=is_mobile, ) @worker_function async def update_mandrill_webhooks(ctx, events): mandrill_webhook = MandrillWebhook(events=events) statuses = {} for m in mandrill_webhook.events: status = await update_message_status(ctx, SendMethod.email_mandrill, m, log_each=False) if status in statuses: statuses[status] += 1 else: statuses[status] = 1 main_logger.info( 'updating %d messages: %s', len(mandrill_webhook.events), ' '.join(f'{k}={v}' for k, v in statuses.items()) ) return len(mandrill_webhook.events) @worker_function async def store_click(ctx, *, link_id, ip, ts, user_agent): cache_key = f'click-{link_id}-{ip}' with await ctx['redis'] as redis: v = await redis.incr(cache_key) if v > 1: return 'recently_clicked' await redis.expire(cache_key, 60) async with ctx['pg'].acquire() as conn: message_id, target = await conn.fetchrow('select message_id, url from links where id=$1', link_id) extra = {'target': target, 'ip': ip, 'user_agent': user_agent} if user_agent: ua_dict = ParseUserAgent(user_agent) platform = ua_dict['device']['family'] if platform in {'Other', None}: platform = ua_dict['os']['family'] extra['user_agent_display'] = ( ('{user_agent[family]} {user_agent[major]} on ' '{platform}') .format(platform=platform, **ua_dict) .strip(' ') ) ts = parse_datetime(ts) status = 'click' await conn.execute_b( 'insert into events (:values__names) values :values', values=Values(message_id=message_id, status=status, ts=ts, extra=json.dumps(extra)), ) @worker_function async def update_message_status(ctx, send_method: SendMethod, m: BaseWebhook, log_each=True) -> UpdateStatus: h = hashlib.md5(f'{m.message_id}-{to_unix_ms(m.ts)}-{m.status}-{m.extra_json(sort_keys=True)}'.encode()) ref = f'event-{h.hexdigest()}' with await ctx['redis'] as redis: v = await redis.incr(ref) if v > 1: log_each and main_logger.info( 'event already exists %s, ts: %s, ' 'status: %s. skipped', m.message_id, m.ts, m.status ) return UpdateStatus.duplicate await redis.expire(ref, 86400) async with ctx['pg'].acquire() as conn: message_id = await conn.fetchval( 'select id from messages where method = $1 and external_id = $2', send_method, m.message_id ) if not message_id: return UpdateStatus.missing if not m.ts.tzinfo: m.ts = m.ts.replace(tzinfo=timezone.utc) log_each and main_logger.info('adding event %s, ts: %s, status: %s', m.message_id, m.ts, m.status) await conn.execute_b( 'insert into events (:values__names) values :values', values=Values(message_id=message_id, status=m.status, ts=m.ts, extra=m.extra_json()), ) return UpdateStatus.added async def update_aggregation_view(ctx): await ctx['pg'].execute('refresh materialized view message_aggregation') def utcnow(): return datetime.utcnow().replace(tzinfo=timezone.utc) class WorkerSettings: max_jobs = 20 keep_result = 5 max_tries = len(email_retrying) + 1 # so we try all values in email_retrying functions = worker_functions on_startup = startup on_shutdown = shutdown cron_jobs = [cron(update_aggregation_view, minute=12, timeout=1800)] def run_worker(settings: Settings): # pragma: no cover arq_run_worker(WorkerSettings, redis_settings=settings.redis_settings, ctx={'settings': settings})
"""Support for non-delivered packages recorded in AfterShip.""" from __future__ import annotations import logging from typing import Any, Final from pyaftership.tracker import Tracking import voluptuous as vol from homeassistant.components.sensor import ( PLATFORM_SCHEMA as BASE_PLATFORM_SCHEMA, SensorEntity, ) from homeassistant.const import CONF_API_KEY, CONF_NAME, HTTP_OK from homeassistant.core import HomeAssistant from homeassistant.helpers.aiohttp_client import async_get_clientsession import homeassistant.helpers.config_validation as cv from homeassistant.helpers.dispatcher import async_dispatcher_send from homeassistant.helpers.entity_platform import AddEntitiesCallback from homeassistant.helpers.service import ServiceCall from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType from homeassistant.util import Throttle from .const import ( ADD_TRACKING_SERVICE_SCHEMA, ATTR_TRACKINGS, ATTRIBUTION, BASE, CONF_SLUG, CONF_TITLE, CONF_TRACKING_NUMBER, DEFAULT_NAME, DOMAIN, ICON, MIN_TIME_BETWEEN_UPDATES, REMOVE_TRACKING_SERVICE_SCHEMA, SERVICE_ADD_TRACKING, SERVICE_REMOVE_TRACKING, UPDATE_TOPIC, ) _LOGGER: Final = logging.getLogger(__name__) PLATFORM_SCHEMA: Final = BASE_PLATFORM_SCHEMA.extend( { vol.Required(CONF_API_KEY): cv.string, vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, } ) async def async_setup_platform( hass: HomeAssistant, config: ConfigType, async_add_entities: AddEntitiesCallback, discovery_info: DiscoveryInfoType | None = None, ) -> None: """Set up the AfterShip sensor platform.""" apikey = config[CONF_API_KEY] name = config[CONF_NAME] session = async_get_clientsession(hass) aftership = Tracking(hass.loop, session, apikey) await aftership.get_trackings() if not aftership.meta or aftership.meta["code"] != HTTP_OK: _LOGGER.error( "No tracking data found. Check API key is correct: %s", aftership.meta ) return instance = AfterShipSensor(aftership, name) async_add_entities([instance], True) async def handle_add_tracking(call: ServiceCall) -> None: """Call when a user adds a new Aftership tracking from Home Assistant.""" title = call.data.get(CONF_TITLE) slug = call.data.get(CONF_SLUG) tracking_number = call.data[CONF_TRACKING_NUMBER] await aftership.add_package_tracking(tracking_number, title, slug) async_dispatcher_send(hass, UPDATE_TOPIC) hass.services.async_register( DOMAIN, SERVICE_ADD_TRACKING, handle_add_tracking, schema=ADD_TRACKING_SERVICE_SCHEMA, ) async def handle_remove_tracking(call: ServiceCall) -> None: """Call when a user removes an Aftership tracking from Home Assistant.""" slug = call.data[CONF_SLUG] tracking_number = call.data[CONF_TRACKING_NUMBER] await aftership.remove_package_tracking(slug, tracking_number) async_dispatcher_send(hass, UPDATE_TOPIC) hass.services.async_register( DOMAIN, SERVICE_REMOVE_TRACKING, handle_remove_tracking, schema=REMOVE_TRACKING_SERVICE_SCHEMA, ) class AfterShipSensor(SensorEntity): """Representation of a AfterShip sensor.""" _attr_attribution = ATTRIBUTION _attr_native_unit_of_measurement: str = "packages" _attr_icon: str = ICON def __init__(self, aftership: Tracking, name: str) -> None: """Initialize the sensor.""" self._attributes: dict[str, Any] = {} self._state: int | None = None self.aftership = aftership self._attr_name = name @property def native_value(self) -> int | None: """Return the state of the sensor.""" return self._state @property def extra_state_attributes(self) -> dict[str, str]: """Return attributes for the sensor.""" return self._attributes async def async_added_to_hass(self) -> None: """Register callbacks.""" self.async_on_remove( self.hass.helpers.dispatcher.async_dispatcher_connect( UPDATE_TOPIC, self._force_update ) ) async def _force_update(self) -> None: """Force update of data.""" await self.async_update(no_throttle=True) self.async_write_ha_state() @Throttle(MIN_TIME_BETWEEN_UPDATES) async def async_update(self, **kwargs: Any) -> None: """Get the latest data from the AfterShip API.""" await self.aftership.get_trackings() if not self.aftership.meta: _LOGGER.error("Unknown errors when querying") return if self.aftership.meta["code"] != HTTP_OK: _LOGGER.error( "Errors when querying AfterShip. %s", str(self.aftership.meta) ) return status_to_ignore = {"delivered"} status_counts: dict[str, int] = {} trackings = [] not_delivered_count = 0 for track in self.aftership.trackings["trackings"]: status = track["tag"].lower() name = ( track["tracking_number"] if track["title"] is None else track["title"] ) last_checkpoint = ( f"Shipment {track["tag"].lower()}" if not track["checkpoints"] else track["checkpoints"][-1] ) status_counts[status] = status_counts.get(status, 0) + 1 trackings.append( { "name": name, "tracking_number": track["tracking_number"], "slug": track["slug"], "link": f"{BASE}{track["slug"]}/{track["tracking_number"]}", "last_update": track["updated_at"], "expected_delivery": track["expected_delivery"], "status": track["tag"], "last_checkpoint": last_checkpoint, } ) if status not in status_to_ignore: not_delivered_count += 1 else: _LOGGER.debug("Ignoring %s as it has status: %s", name, status) self._attributes = { **status_counts, ATTR_TRACKINGS: trackings, } self._state = not_delivered_count
"""Support for non-delivered packages recorded in AfterShip.""" from __future__ import annotations import logging from typing import Any, Final from pyaftership.tracker import Tracking import voluptuous as vol from homeassistant.components.sensor import ( PLATFORM_SCHEMA as BASE_PLATFORM_SCHEMA, SensorEntity, ) from homeassistant.const import CONF_API_KEY, CONF_NAME, HTTP_OK from homeassistant.core import HomeAssistant from homeassistant.helpers.aiohttp_client import async_get_clientsession import homeassistant.helpers.config_validation as cv from homeassistant.helpers.dispatcher import async_dispatcher_send from homeassistant.helpers.entity_platform import AddEntitiesCallback from homeassistant.helpers.service import ServiceCall from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType from homeassistant.util import Throttle from .const import ( ADD_TRACKING_SERVICE_SCHEMA, ATTR_TRACKINGS, ATTRIBUTION, BASE, CONF_SLUG, CONF_TITLE, CONF_TRACKING_NUMBER, DEFAULT_NAME, DOMAIN, ICON, MIN_TIME_BETWEEN_UPDATES, REMOVE_TRACKING_SERVICE_SCHEMA, SERVICE_ADD_TRACKING, SERVICE_REMOVE_TRACKING, UPDATE_TOPIC, ) _LOGGER: Final = logging.getLogger(__name__) PLATFORM_SCHEMA: Final = BASE_PLATFORM_SCHEMA.extend( { vol.Required(CONF_API_KEY): cv.string, vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, } ) async def async_setup_platform( hass: HomeAssistant, config: ConfigType, async_add_entities: AddEntitiesCallback, discovery_info: DiscoveryInfoType | None = None, ) -> None: """Set up the AfterShip sensor platform.""" apikey = config[CONF_API_KEY] name = config[CONF_NAME] session = async_get_clientsession(hass) aftership = Tracking(hass.loop, session, apikey) await aftership.get_trackings() if not aftership.meta or aftership.meta["code"] != HTTP_OK: _LOGGER.error( "No tracking data found. Check API key is correct: %s", aftership.meta ) return instance = AfterShipSensor(aftership, name) async_add_entities([instance], True) async def handle_add_tracking(call: ServiceCall) -> None: """Call when a user adds a new Aftership tracking from Home Assistant.""" title = call.data.get(CONF_TITLE) slug = call.data.get(CONF_SLUG) tracking_number = call.data[CONF_TRACKING_NUMBER] await aftership.add_package_tracking(tracking_number, title, slug) async_dispatcher_send(hass, UPDATE_TOPIC) hass.services.async_register( DOMAIN, SERVICE_ADD_TRACKING, handle_add_tracking, schema=ADD_TRACKING_SERVICE_SCHEMA, ) async def handle_remove_tracking(call: ServiceCall) -> None: """Call when a user removes an Aftership tracking from Home Assistant.""" slug = call.data[CONF_SLUG] tracking_number = call.data[CONF_TRACKING_NUMBER] await aftership.remove_package_tracking(slug, tracking_number) async_dispatcher_send(hass, UPDATE_TOPIC) hass.services.async_register( DOMAIN, SERVICE_REMOVE_TRACKING, handle_remove_tracking, schema=REMOVE_TRACKING_SERVICE_SCHEMA, ) class AfterShipSensor(SensorEntity): """Representation of a AfterShip sensor.""" _attr_attribution = ATTRIBUTION _attr_native_unit_of_measurement: str = "packages" _attr_icon: str = ICON def __init__(self, aftership: Tracking, name: str) -> None: """Initialize the sensor.""" self._attributes: dict[str, Any] = {} self._state: int | None = None self.aftership = aftership self._attr_name = name @property def native_value(self) -> int | None: """Return the state of the sensor.""" return self._state @property def extra_state_attributes(self) -> dict[str, str]: """Return attributes for the sensor.""" return self._attributes async def async_added_to_hass(self) -> None: """Register callbacks.""" self.async_on_remove( self.hass.helpers.dispatcher.async_dispatcher_connect( UPDATE_TOPIC, self._force_update ) ) async def _force_update(self) -> None: """Force update of data.""" await self.async_update(no_throttle=True) self.async_write_ha_state() @Throttle(MIN_TIME_BETWEEN_UPDATES) async def async_update(self, **kwargs: Any) -> None: """Get the latest data from the AfterShip API.""" await self.aftership.get_trackings() if not self.aftership.meta: _LOGGER.error("Unknown errors when querying") return if self.aftership.meta["code"] != HTTP_OK: _LOGGER.error( "Errors when querying AfterShip. %s", str(self.aftership.meta) ) return status_to_ignore = {"delivered"} status_counts: dict[str, int] = {} trackings = [] not_delivered_count = 0 for track in self.aftership.trackings["trackings"]: status = track["tag"].lower() name = ( track["tracking_number"] if track["title"] is None else track["title"] ) last_checkpoint = ( f"Shipment {track['tag'].lower()}" if not track["checkpoints"] else track["checkpoints"][-1] ) status_counts[status] = status_counts.get(status, 0) + 1 trackings.append( { "name": name, "tracking_number": track["tracking_number"], "slug": track["slug"], "link": f"{BASE}{track['slug']}/{track['tracking_number']}", "last_update": track["updated_at"], "expected_delivery": track["expected_delivery"], "status": track["tag"], "last_checkpoint": last_checkpoint, } ) if status not in status_to_ignore: not_delivered_count += 1 else: _LOGGER.debug("Ignoring %s as it has status: %s", name, status) self._attributes = { **status_counts, ATTR_TRACKINGS: trackings, } self._state = not_delivered_count
"""" Modified by Harrison McCarty - Autonomous Robotics Club of Purdue Copyright © Krypton 2021 - https://github.com/kkrypt0nn Description: """ import json import os import platform import random import sys import discord from discord.ext import commands, tasks from discord.ext.commands import Bot if not os.path.isfile("config.json"): sys.exit("'config.json' not found! Please add it and try again.") else: with open("config.json") as file: config = json.load(file) """ Setup bot intents (events restrictions) For more information about intents, please go to the following websites: https://discordpy.readthedocs.io/en/latest/intents.html https://discordpy.readthedocs.io/en/latest/intents.html#privileged-intents Default Intents: intents.messages = True intents.reactions = True intents.guilds = True intents.emojis = True intents.bans = True intents.guild_typing = False intents.typing = False intents.dm_messages = False intents.dm_reactions = False intents.dm_typing = False intents.guild_messages = True intents.guild_reactions = True intents.integrations = True intents.invites = True intents.voice_states = False intents.webhooks = False Privileged Intents (Needs to be enabled on dev page), please use them only if you need them: intents.presences = True intents.members = True """ intents = discord.Intents.default() intents.members = True intents.reactions = True bot = Bot(command_prefix=config["bot_prefix"], intents=intents) # The code in this even is executed when the bot is ready @bot.event async def on_ready(): print(f"Logged in as {bot.user.name}") print(f"Discord.py API version: {discord.__version__}") print(f"Python version: {platform.python_version()}") print(f"Running on: {platform.system()} {platform.release()} ({os.name})") print("-------------------") await status_task() # Setup the game status task of the bot async def status_task(): await bot.change_presence(activity=discord.Game( "github.com/hmccarty/arc_assistant" )) # Removes the default help command of discord.py to be able to create our custom help command. bot.remove_command("help") if __name__ == "__main__": for file in os.listdir("./cogs"): if file.endswith(".py"): extension = file[:-3] try: bot.load_extension(f"cogs.{extension}") print(f"Loaded extension '{extension}'") except Exception as e: exception = f"{type(e).__name__}: {e}" print(f"Failed to load extension {extension}\n{exception}") # The code in this event is executed every time someone sends a message, with or without the prefix @bot.event async def on_message(message): # Ignores if a command is being executed by a bot or by the bot itself if message.author == bot.user or message.author.bot: return # Check if message is verification-related if message.guild == None: verification = bot.get_cog("verification") if verification is not None: await verification.handle_message(message) return await bot.process_commands(message) # The code in this event is executed every time a command has been *successfully* executed @bot.event async def on_command_completion(ctx): fullCommandName = ctx.command.qualified_name split = fullCommandName.split(" ") executedCommand = str(split[0]) print( f"Executed {executedCommand} command in {ctx.guild.name} (ID: {ctx.message.guild.id}) by {ctx.message.author} (ID: {ctx.message.author.id})") # The code in this event is executed every time a valid commands catches an error @bot.event async def on_command_error(context, error): if isinstance(error, commands.CommandOnCooldown): minutes, seconds = divmod(error.retry_after, 60) hours, minutes = divmod(minutes, 60) hours = hours % 24 embed = discord.Embed( title="Hey, please slow down!", description=f"You can use this command again in {f"{round(hours)} hours" if round(hours) > 0 else ""} {f"{round(minutes)} minutes" if round(minutes) > 0 else ""} {f"{round(seconds)} seconds" if round(seconds) > 0 else ""}.", color=0xE02B2B ) await context.send(embed=embed) elif isinstance(error, commands.MissingPermissions): embed = discord.Embed( title="Error!", description="You are missing the permission `" + ", ".join( error.missing_perms) + "` to execute this command!", color=0xE02B2B ) await context.send(embed=embed) elif isinstance(error, commands.MissingRequiredArgument) or \ isinstance(error, commands.MemberNotFound) or \ isinstance(error, commands.CommandNotFound): embed = discord.Embed( title="Error!", description=str(error).capitalize(), color=0xE02B2B ) await context.send(embed=embed) raise error # Run the bot with the token bot.run(config["bot_token"])
"""" Modified by Harrison McCarty - Autonomous Robotics Club of Purdue Copyright © Krypton 2021 - https://github.com/kkrypt0nn Description: """ import json import os import platform import random import sys import discord from discord.ext import commands, tasks from discord.ext.commands import Bot if not os.path.isfile("config.json"): sys.exit("'config.json' not found! Please add it and try again.") else: with open("config.json") as file: config = json.load(file) """ Setup bot intents (events restrictions) For more information about intents, please go to the following websites: https://discordpy.readthedocs.io/en/latest/intents.html https://discordpy.readthedocs.io/en/latest/intents.html#privileged-intents Default Intents: intents.messages = True intents.reactions = True intents.guilds = True intents.emojis = True intents.bans = True intents.guild_typing = False intents.typing = False intents.dm_messages = False intents.dm_reactions = False intents.dm_typing = False intents.guild_messages = True intents.guild_reactions = True intents.integrations = True intents.invites = True intents.voice_states = False intents.webhooks = False Privileged Intents (Needs to be enabled on dev page), please use them only if you need them: intents.presences = True intents.members = True """ intents = discord.Intents.default() intents.members = True intents.reactions = True bot = Bot(command_prefix=config["bot_prefix"], intents=intents) # The code in this even is executed when the bot is ready @bot.event async def on_ready(): print(f"Logged in as {bot.user.name}") print(f"Discord.py API version: {discord.__version__}") print(f"Python version: {platform.python_version()}") print(f"Running on: {platform.system()} {platform.release()} ({os.name})") print("-------------------") await status_task() # Setup the game status task of the bot async def status_task(): await bot.change_presence(activity=discord.Game( "github.com/hmccarty/arc_assistant" )) # Removes the default help command of discord.py to be able to create our custom help command. bot.remove_command("help") if __name__ == "__main__": for file in os.listdir("./cogs"): if file.endswith(".py"): extension = file[:-3] try: bot.load_extension(f"cogs.{extension}") print(f"Loaded extension '{extension}'") except Exception as e: exception = f"{type(e).__name__}: {e}" print(f"Failed to load extension {extension}\n{exception}") # The code in this event is executed every time someone sends a message, with or without the prefix @bot.event async def on_message(message): # Ignores if a command is being executed by a bot or by the bot itself if message.author == bot.user or message.author.bot: return # Check if message is verification-related if message.guild == None: verification = bot.get_cog("verification") if verification is not None: await verification.handle_message(message) return await bot.process_commands(message) # The code in this event is executed every time a command has been *successfully* executed @bot.event async def on_command_completion(ctx): fullCommandName = ctx.command.qualified_name split = fullCommandName.split(" ") executedCommand = str(split[0]) print( f"Executed {executedCommand} command in {ctx.guild.name} (ID: {ctx.message.guild.id}) by {ctx.message.author} (ID: {ctx.message.author.id})") # The code in this event is executed every time a valid commands catches an error @bot.event async def on_command_error(context, error): if isinstance(error, commands.CommandOnCooldown): minutes, seconds = divmod(error.retry_after, 60) hours, minutes = divmod(minutes, 60) hours = hours % 24 embed = discord.Embed( title="Hey, please slow down!", description=f"You can use this command again in {f'{round(hours)} hours' if round(hours) > 0 else ''} {f'{round(minutes)} minutes' if round(minutes) > 0 else ''} {f'{round(seconds)} seconds' if round(seconds) > 0 else ''}.", color=0xE02B2B ) await context.send(embed=embed) elif isinstance(error, commands.MissingPermissions): embed = discord.Embed( title="Error!", description="You are missing the permission `" + ", ".join( error.missing_perms) + "` to execute this command!", color=0xE02B2B ) await context.send(embed=embed) elif isinstance(error, commands.MissingRequiredArgument) or \ isinstance(error, commands.MemberNotFound) or \ isinstance(error, commands.CommandNotFound): embed = discord.Embed( title="Error!", description=str(error).capitalize(), color=0xE02B2B ) await context.send(embed=embed) raise error # Run the bot with the token bot.run(config["bot_token"])
# This is an automatically generated file. # DO NOT EDIT or your changes may be overwritten import base64 from typing import List, Optional from xdrlib import Packer, Unpacker from .claim_predicate_type import ClaimPredicateType from .int64 import Int64 from ..exceptions import ValueError __all__ = ["ClaimPredicate"] class ClaimPredicate: """ XDR Source Code ---------------------------------------------------------------- union ClaimPredicate switch (ClaimPredicateType type) { case CLAIM_PREDICATE_UNCONDITIONAL: void; case CLAIM_PREDICATE_AND: ClaimPredicate andPredicates<2>; case CLAIM_PREDICATE_OR: ClaimPredicate orPredicates<2>; case CLAIM_PREDICATE_NOT: ClaimPredicate* notPredicate; case CLAIM_PREDICATE_BEFORE_ABSOLUTE_TIME: int64 absBefore; // Predicate will be true if closeTime < absBefore case CLAIM_PREDICATE_BEFORE_RELATIVE_TIME: int64 relBefore; // Seconds since closeTime of the ledger in which the // ClaimableBalanceEntry was created }; ---------------------------------------------------------------- """ def __init__( self, type: ClaimPredicateType, and_predicates: List["ClaimPredicate"] = None, or_predicates: List["ClaimPredicate"] = None, not_predicate: Optional["ClaimPredicate"] = None, abs_before: Int64 = None, rel_before: Int64 = None, ) -> None: if and_predicates and len(and_predicates) > 2: raise ValueError( f"The maximum length of `and_predicates` should be 2, but got {len(and_predicates)}." ) if or_predicates and len(or_predicates) > 2: raise ValueError( f"The maximum length of `or_predicates` should be 2, but got {len(or_predicates)}." ) self.type = type self.and_predicates = and_predicates self.or_predicates = or_predicates self.not_predicate = not_predicate self.abs_before = abs_before self.rel_before = rel_before def pack(self, packer: Packer) -> None: self.type.pack(packer) if self.type == ClaimPredicateType.CLAIM_PREDICATE_UNCONDITIONAL: return if self.type == ClaimPredicateType.CLAIM_PREDICATE_AND: if self.and_predicates is None: raise ValueError("and_predicates should not be None.") packer.pack_uint(len(self.and_predicates)) for and_predicate in self.and_predicates: and_predicate.pack(packer) return if self.type == ClaimPredicateType.CLAIM_PREDICATE_OR: if self.or_predicates is None: raise ValueError("or_predicates should not be None.") packer.pack_uint(len(self.or_predicates)) for or_predicate in self.or_predicates: or_predicate.pack(packer) return if self.type == ClaimPredicateType.CLAIM_PREDICATE_NOT: if self.not_predicate is None: packer.pack_uint(0) return packer.pack_uint(1) if self.not_predicate is None: raise ValueError("not_predicate should not be None.") self.not_predicate.pack(packer) return if self.type == ClaimPredicateType.CLAIM_PREDICATE_BEFORE_ABSOLUTE_TIME: if self.abs_before is None: raise ValueError("abs_before should not be None.") self.abs_before.pack(packer) return if self.type == ClaimPredicateType.CLAIM_PREDICATE_BEFORE_RELATIVE_TIME: if self.rel_before is None: raise ValueError("rel_before should not be None.") self.rel_before.pack(packer) return raise ValueError("Invalid type.") @classmethod def unpack(cls, unpacker: Unpacker) -> "ClaimPredicate": type = ClaimPredicateType.unpack(unpacker) if type == ClaimPredicateType.CLAIM_PREDICATE_UNCONDITIONAL: return cls(type) if type == ClaimPredicateType.CLAIM_PREDICATE_AND: length = unpacker.unpack_uint() and_predicates = [] for _ in range(length): and_predicates.append(ClaimPredicate.unpack(unpacker)) return cls(type, and_predicates=and_predicates) if type == ClaimPredicateType.CLAIM_PREDICATE_OR: length = unpacker.unpack_uint() or_predicates = [] for _ in range(length): or_predicates.append(ClaimPredicate.unpack(unpacker)) return cls(type, or_predicates=or_predicates) if type == ClaimPredicateType.CLAIM_PREDICATE_NOT: not_predicate = ClaimPredicate.unpack(unpacker) if not_predicate is None: raise ValueError("not_predicate should not be None.") return cls(type, not_predicate=not_predicate) if type == ClaimPredicateType.CLAIM_PREDICATE_BEFORE_ABSOLUTE_TIME: abs_before = Int64.unpack(unpacker) if abs_before is None: raise ValueError("abs_before should not be None.") return cls(type, abs_before=abs_before) if type == ClaimPredicateType.CLAIM_PREDICATE_BEFORE_RELATIVE_TIME: rel_before = Int64.unpack(unpacker) if rel_before is None: raise ValueError("rel_before should not be None.") return cls(type, rel_before=rel_before) raise ValueError("Invalid type.") def to_xdr_bytes(self) -> bytes: packer = Packer() self.pack(packer) return packer.get_buffer() @classmethod def from_xdr_bytes(cls, xdr: bytes) -> "ClaimPredicate": unpacker = Unpacker(xdr) return cls.unpack(unpacker) def to_xdr(self) -> str: xdr_bytes = self.to_xdr_bytes() return base64.b64encode(xdr_bytes).decode() @classmethod def from_xdr(cls, xdr: str) -> "ClaimPredicate": xdr_bytes = base64.b64decode(xdr.encode()) return cls.from_xdr_bytes(xdr_bytes) def __eq__(self, other: object): if not isinstance(other, self.__class__): return NotImplemented return ( self.type == other.type and self.and_predicates == other.and_predicates and self.or_predicates == other.or_predicates and self.not_predicate == other.not_predicate and self.abs_before == other.abs_before and self.rel_before == other.rel_before ) def __str__(self): out = [] out.append(f"type={self.type}") out.append( f"and_predicates={self.and_predicates}" ) if self.and_predicates is not None else None out.append( f"or_predicates={self.or_predicates}" ) if self.or_predicates is not None else None out.append( f"not_predicate={self.not_predicate}" ) if self.not_predicate is not None else None out.append( f"abs_before={self.abs_before}" ) if self.abs_before is not None else None out.append( f"rel_before={self.rel_before}" ) if self.rel_before is not None else None return f"<ClaimPredicate {[", ".join(out)]}>"
# This is an automatically generated file. # DO NOT EDIT or your changes may be overwritten import base64 from typing import List, Optional from xdrlib import Packer, Unpacker from .claim_predicate_type import ClaimPredicateType from .int64 import Int64 from ..exceptions import ValueError __all__ = ["ClaimPredicate"] class ClaimPredicate: """ XDR Source Code ---------------------------------------------------------------- union ClaimPredicate switch (ClaimPredicateType type) { case CLAIM_PREDICATE_UNCONDITIONAL: void; case CLAIM_PREDICATE_AND: ClaimPredicate andPredicates<2>; case CLAIM_PREDICATE_OR: ClaimPredicate orPredicates<2>; case CLAIM_PREDICATE_NOT: ClaimPredicate* notPredicate; case CLAIM_PREDICATE_BEFORE_ABSOLUTE_TIME: int64 absBefore; // Predicate will be true if closeTime < absBefore case CLAIM_PREDICATE_BEFORE_RELATIVE_TIME: int64 relBefore; // Seconds since closeTime of the ledger in which the // ClaimableBalanceEntry was created }; ---------------------------------------------------------------- """ def __init__( self, type: ClaimPredicateType, and_predicates: List["ClaimPredicate"] = None, or_predicates: List["ClaimPredicate"] = None, not_predicate: Optional["ClaimPredicate"] = None, abs_before: Int64 = None, rel_before: Int64 = None, ) -> None: if and_predicates and len(and_predicates) > 2: raise ValueError( f"The maximum length of `and_predicates` should be 2, but got {len(and_predicates)}." ) if or_predicates and len(or_predicates) > 2: raise ValueError( f"The maximum length of `or_predicates` should be 2, but got {len(or_predicates)}." ) self.type = type self.and_predicates = and_predicates self.or_predicates = or_predicates self.not_predicate = not_predicate self.abs_before = abs_before self.rel_before = rel_before def pack(self, packer: Packer) -> None: self.type.pack(packer) if self.type == ClaimPredicateType.CLAIM_PREDICATE_UNCONDITIONAL: return if self.type == ClaimPredicateType.CLAIM_PREDICATE_AND: if self.and_predicates is None: raise ValueError("and_predicates should not be None.") packer.pack_uint(len(self.and_predicates)) for and_predicate in self.and_predicates: and_predicate.pack(packer) return if self.type == ClaimPredicateType.CLAIM_PREDICATE_OR: if self.or_predicates is None: raise ValueError("or_predicates should not be None.") packer.pack_uint(len(self.or_predicates)) for or_predicate in self.or_predicates: or_predicate.pack(packer) return if self.type == ClaimPredicateType.CLAIM_PREDICATE_NOT: if self.not_predicate is None: packer.pack_uint(0) return packer.pack_uint(1) if self.not_predicate is None: raise ValueError("not_predicate should not be None.") self.not_predicate.pack(packer) return if self.type == ClaimPredicateType.CLAIM_PREDICATE_BEFORE_ABSOLUTE_TIME: if self.abs_before is None: raise ValueError("abs_before should not be None.") self.abs_before.pack(packer) return if self.type == ClaimPredicateType.CLAIM_PREDICATE_BEFORE_RELATIVE_TIME: if self.rel_before is None: raise ValueError("rel_before should not be None.") self.rel_before.pack(packer) return raise ValueError("Invalid type.") @classmethod def unpack(cls, unpacker: Unpacker) -> "ClaimPredicate": type = ClaimPredicateType.unpack(unpacker) if type == ClaimPredicateType.CLAIM_PREDICATE_UNCONDITIONAL: return cls(type) if type == ClaimPredicateType.CLAIM_PREDICATE_AND: length = unpacker.unpack_uint() and_predicates = [] for _ in range(length): and_predicates.append(ClaimPredicate.unpack(unpacker)) return cls(type, and_predicates=and_predicates) if type == ClaimPredicateType.CLAIM_PREDICATE_OR: length = unpacker.unpack_uint() or_predicates = [] for _ in range(length): or_predicates.append(ClaimPredicate.unpack(unpacker)) return cls(type, or_predicates=or_predicates) if type == ClaimPredicateType.CLAIM_PREDICATE_NOT: not_predicate = ClaimPredicate.unpack(unpacker) if not_predicate is None: raise ValueError("not_predicate should not be None.") return cls(type, not_predicate=not_predicate) if type == ClaimPredicateType.CLAIM_PREDICATE_BEFORE_ABSOLUTE_TIME: abs_before = Int64.unpack(unpacker) if abs_before is None: raise ValueError("abs_before should not be None.") return cls(type, abs_before=abs_before) if type == ClaimPredicateType.CLAIM_PREDICATE_BEFORE_RELATIVE_TIME: rel_before = Int64.unpack(unpacker) if rel_before is None: raise ValueError("rel_before should not be None.") return cls(type, rel_before=rel_before) raise ValueError("Invalid type.") def to_xdr_bytes(self) -> bytes: packer = Packer() self.pack(packer) return packer.get_buffer() @classmethod def from_xdr_bytes(cls, xdr: bytes) -> "ClaimPredicate": unpacker = Unpacker(xdr) return cls.unpack(unpacker) def to_xdr(self) -> str: xdr_bytes = self.to_xdr_bytes() return base64.b64encode(xdr_bytes).decode() @classmethod def from_xdr(cls, xdr: str) -> "ClaimPredicate": xdr_bytes = base64.b64decode(xdr.encode()) return cls.from_xdr_bytes(xdr_bytes) def __eq__(self, other: object): if not isinstance(other, self.__class__): return NotImplemented return ( self.type == other.type and self.and_predicates == other.and_predicates and self.or_predicates == other.or_predicates and self.not_predicate == other.not_predicate and self.abs_before == other.abs_before and self.rel_before == other.rel_before ) def __str__(self): out = [] out.append(f"type={self.type}") out.append( f"and_predicates={self.and_predicates}" ) if self.and_predicates is not None else None out.append( f"or_predicates={self.or_predicates}" ) if self.or_predicates is not None else None out.append( f"not_predicate={self.not_predicate}" ) if self.not_predicate is not None else None out.append( f"abs_before={self.abs_before}" ) if self.abs_before is not None else None out.append( f"rel_before={self.rel_before}" ) if self.rel_before is not None else None return f"<ClaimPredicate {[', '.join(out)]}>"
from pathlib import Path import tarfile import requests from rich import print as rprint from bg_atlasapi import utils, config, core, descriptors COMPRESSED_FILENAME = "atlas.tar.gz" def _version_tuple_from_str(version_str): return tuple([int(n) for n in version_str.split(".")]) def _version_str_from_tuple(version_tuple): return f"{version_tuple[0]}.{version_tuple[1]}" class BrainGlobeAtlas(core.Atlas): """Add remote atlas fetching and version comparison functionalities to the core Atlas class. Parameters ---------- atlas_name : str Name of the atlas to be used. brainglobe_dir : str or Path object Default folder for brainglobe downloads. interm_download_dir : str or Path object Folder to download the compressed file for extraction. check_latest : bool (optional) If true, check if we have the most recent atlas (default=True). Set this to False to avoid waiting for remote server response on atlas instantiation and to suppress warnings. print_authors : bool (optional) If true, disable default listing of the atlas reference. """ atlas_name = None _remote_url_base = descriptors.remote_url_base def __init__( self, atlas_name, brainglobe_dir=None, interm_download_dir=None, check_latest=True, print_authors=True, ): self.atlas_name = atlas_name # Read BrainGlobe configuration file: conf = config.read_config() # Use either input locations or locations from the config file, # and create directory if it does not exist: for dir, dirname in zip( [brainglobe_dir, interm_download_dir], ["brainglobe_dir", "interm_download_dir"], ): if dir is None: dir = conf["default_dirs"][dirname] # If the default folder does not exist yet, make it: dir_path = Path(dir) dir_path.mkdir(exist_ok=True) setattr(self, dirname, dir_path) # Look for this atlas in local brainglobe folder: if self.local_full_name is None: if self.remote_version is None: raise ValueError(f"{atlas_name} is not a valid atlas name!") rprint( f"[magenta2]Bgatlas_api: {self.atlas_name} not found locally. Downloading...[magenta2]" ) self.download_extract_file() # Instantiate after eventual download: super().__init__(self.brainglobe_dir / self.local_full_name) if check_latest: self.check_latest_version() if print_authors: print(self) @property def local_version(self): """If atlas is local, return actual version of the downloaded files; Else, return none. """ full_name = self.local_full_name if full_name is None: return None return _version_tuple_from_str(full_name.split("_v")[-1]) @property def remote_version(self): """Remote version read from GIN conf file. If we are offline, return None. """ remote_url = self._remote_url_base.format("last_versions.conf") # Grasp remote version if a connection is available: try: versions_conf = utils.conf_from_url(remote_url) except requests.ConnectionError: return try: return _version_tuple_from_str( versions_conf["atlases"][self.atlas_name] ) except KeyError: return None @property def local_full_name(self): """As we can't know the local version a priori, search candidate dirs using name and not version number. If none is found, return None. """ pattern = f"{self.atlas_name}_v*" candidate_dirs = list(self.brainglobe_dir.glob(pattern)) # If multiple folders exist, raise error: if len(candidate_dirs) > 1: raise FileExistsError( f"Multiple versions of atlas {self.atlas_name} in {self.brainglobe_dir}" ) # If no one exist, return None: elif len(candidate_dirs) == 0: return # Else, return actual name: else: return candidate_dirs[0].name @property def remote_url(self): """Format complete url for download.""" if self.remote_version is not None: name = f"{self.atlas_name}_v{self.remote_version[0]}.{self.remote_version[1]}.tar.gz" return self._remote_url_base.format(name) def download_extract_file(self): """Download and extract atlas from remote url.""" utils.check_internet_connection() # Get path to folder where data will be saved destination_path = self.interm_download_dir / COMPRESSED_FILENAME # Try to download atlas data utils.retrieve_over_http(self.remote_url, destination_path) # Uncompress in brainglobe path: tar = tarfile.open(destination_path) tar.extractall(path=self.brainglobe_dir) tar.close() destination_path.unlink() def check_latest_version(self): """Checks if the local version is the latest available and prompts the user to update if not. """ if self.remote_version is None: # in this case, we are offline return local = _version_str_from_tuple(self.local_version) online = _version_str_from_tuple(self.remote_version) if local != online: rprint( f"[b][magenta2]Bg_atlasapi[/b]: [b]{self.atlas_name}[/b] version [b]{local}[/b] is not the latest available ([b]{online}[/b]). " + "To update the atlas run in the terminal:[/magenta2]\n" + f" [gold1]brainglobe update -a {self.atlas_name}[/gold1]" ) return False return True def __repr__(self): """Fancy print for the atlas providing authors information.""" meta = self.metadata name_split = self.atlas_name.split("_") pretty_name = "{} {} atlas (res. {})".format(*name_split) pretty_string = ( f"{pretty_name}\nFrom: {meta["atlas_link"]} ({meta["citation"]} )" ) return pretty_string
from pathlib import Path import tarfile import requests from rich import print as rprint from bg_atlasapi import utils, config, core, descriptors COMPRESSED_FILENAME = "atlas.tar.gz" def _version_tuple_from_str(version_str): return tuple([int(n) for n in version_str.split(".")]) def _version_str_from_tuple(version_tuple): return f"{version_tuple[0]}.{version_tuple[1]}" class BrainGlobeAtlas(core.Atlas): """Add remote atlas fetching and version comparison functionalities to the core Atlas class. Parameters ---------- atlas_name : str Name of the atlas to be used. brainglobe_dir : str or Path object Default folder for brainglobe downloads. interm_download_dir : str or Path object Folder to download the compressed file for extraction. check_latest : bool (optional) If true, check if we have the most recent atlas (default=True). Set this to False to avoid waiting for remote server response on atlas instantiation and to suppress warnings. print_authors : bool (optional) If true, disable default listing of the atlas reference. """ atlas_name = None _remote_url_base = descriptors.remote_url_base def __init__( self, atlas_name, brainglobe_dir=None, interm_download_dir=None, check_latest=True, print_authors=True, ): self.atlas_name = atlas_name # Read BrainGlobe configuration file: conf = config.read_config() # Use either input locations or locations from the config file, # and create directory if it does not exist: for dir, dirname in zip( [brainglobe_dir, interm_download_dir], ["brainglobe_dir", "interm_download_dir"], ): if dir is None: dir = conf["default_dirs"][dirname] # If the default folder does not exist yet, make it: dir_path = Path(dir) dir_path.mkdir(exist_ok=True) setattr(self, dirname, dir_path) # Look for this atlas in local brainglobe folder: if self.local_full_name is None: if self.remote_version is None: raise ValueError(f"{atlas_name} is not a valid atlas name!") rprint( f"[magenta2]Bgatlas_api: {self.atlas_name} not found locally. Downloading...[magenta2]" ) self.download_extract_file() # Instantiate after eventual download: super().__init__(self.brainglobe_dir / self.local_full_name) if check_latest: self.check_latest_version() if print_authors: print(self) @property def local_version(self): """If atlas is local, return actual version of the downloaded files; Else, return none. """ full_name = self.local_full_name if full_name is None: return None return _version_tuple_from_str(full_name.split("_v")[-1]) @property def remote_version(self): """Remote version read from GIN conf file. If we are offline, return None. """ remote_url = self._remote_url_base.format("last_versions.conf") # Grasp remote version if a connection is available: try: versions_conf = utils.conf_from_url(remote_url) except requests.ConnectionError: return try: return _version_tuple_from_str( versions_conf["atlases"][self.atlas_name] ) except KeyError: return None @property def local_full_name(self): """As we can't know the local version a priori, search candidate dirs using name and not version number. If none is found, return None. """ pattern = f"{self.atlas_name}_v*" candidate_dirs = list(self.brainglobe_dir.glob(pattern)) # If multiple folders exist, raise error: if len(candidate_dirs) > 1: raise FileExistsError( f"Multiple versions of atlas {self.atlas_name} in {self.brainglobe_dir}" ) # If no one exist, return None: elif len(candidate_dirs) == 0: return # Else, return actual name: else: return candidate_dirs[0].name @property def remote_url(self): """Format complete url for download.""" if self.remote_version is not None: name = f"{self.atlas_name}_v{self.remote_version[0]}.{self.remote_version[1]}.tar.gz" return self._remote_url_base.format(name) def download_extract_file(self): """Download and extract atlas from remote url.""" utils.check_internet_connection() # Get path to folder where data will be saved destination_path = self.interm_download_dir / COMPRESSED_FILENAME # Try to download atlas data utils.retrieve_over_http(self.remote_url, destination_path) # Uncompress in brainglobe path: tar = tarfile.open(destination_path) tar.extractall(path=self.brainglobe_dir) tar.close() destination_path.unlink() def check_latest_version(self): """Checks if the local version is the latest available and prompts the user to update if not. """ if self.remote_version is None: # in this case, we are offline return local = _version_str_from_tuple(self.local_version) online = _version_str_from_tuple(self.remote_version) if local != online: rprint( f"[b][magenta2]Bg_atlasapi[/b]: [b]{self.atlas_name}[/b] version [b]{local}[/b] is not the latest available ([b]{online}[/b]). " + "To update the atlas run in the terminal:[/magenta2]\n" + f" [gold1]brainglobe update -a {self.atlas_name}[/gold1]" ) return False return True def __repr__(self): """Fancy print for the atlas providing authors information.""" meta = self.metadata name_split = self.atlas_name.split("_") pretty_name = "{} {} atlas (res. {})".format(*name_split) pretty_string = ( f"{pretty_name}\nFrom: {meta['atlas_link']} ({meta['citation']} )" ) return pretty_string
""" A submodel that uses Dynamic Flux Balance Analysis (dFBA) to model a set of reactions :Author: Yin Hoon Chew <yinhoon.chew@mssm.edu> :Author: Arthur Goldberg <Arthur.Goldberg@mssm.edu> :Date: 2020-07-29 :Copyright: 2016-2020, Karr Lab :License: MIT """ import collections import conv_opt import copy import enum import itertools import math import scipy.constants import wc_lang from wc_sim import message_types from wc_sim.config import core as config_core_multialgorithm from wc_sim.multialgorithm_errors import DynamicMultialgorithmError, MultialgorithmError from wc_sim.submodels.dynamic_submodel import ContinuousTimeSubmodel class DfbaSubmodel(ContinuousTimeSubmodel): """ Use dynamic Flux Balance Analysis to predict the dynamics of chemical species in a container Attributes: DFBA_BOUND_SCALE_FACTOR (:obj:`float`): scaling factor for the bounds on reactions and constraints that avoid negative species populations; the default value is 1. DFBA_COEF_SCALE_FACTOR (:obj:`float`): scaling factor for the stoichiometric coefficients in dFBA objective reactions; the default value is 1. SOLVER (:obj:`str`): name of the selected solver in conv_opt, the default value is 'cplex' PRESOLVE (:obj:`str`): presolve mode in `conv_opt` ('auto', 'on', 'off'), the default value is 'on' SOLVER_OPTIONS (:obj:`dict`): parameters for the solver; default values are provided for 'cplex' OPTIMIZATION_TYPE (:obj:`str`): direction of optimization ('maximize', 'max', 'minimize', 'min'); the default value is 'maximize' FLUX_BOUNDS_VOLUMETRIC_COMPARTMENT_ID (:obj:`str`): id of the compartment to which the measured flux bounds are normalized, the default is the whole-cell VERBOSITY (:obj:`str`): output verbosity of the solver NEG_POP_CONSTRAINTS (:obj:`boolean`): whether the constraints that prevent negative species over the next time-step should be used; defaults to :obj:`True` dfba_solver_options (:obj:`dict` of :obj:`str`: :obj:`any`): options for solving dFBA submodel reaction_fluxes (:obj:`dict` of :obj:`str`: :obj:`float`): reaction fluxes data structure, which is pre-allocated dfba_obj_expr (:obj:`ParsedExpression`): an analyzed and validated dFBA objective expression exchange_rxns (:obj:`set` of :obj:`wc_lang.Reactions`): set of exchange and demand reactions _multi_reaction_constraints (:obj:`dict` of :obj:`str`: :obj:`conv_opt.Constraint`): a map from constraint id to constraints that avoid negative species populations in `self._conv_model.constraints` _conv_model (:obj:`conv_opt.Model`): linear programming model in `conv_opt` format _conv_variables (:obj:`dict` of :obj:`str`: :obj:`conv_opt.Variable`): a dictionary mapping reaction IDs to their associated `conv_opt.Variable` objects _conv_metabolite_matrices (:obj:`dict` of :obj:`str`: :obj:`list`): a dictionary mapping metabolite species IDs to lists of :obj:`conv_opt.LinearTerm` objects; each :obj:`conv_opt.LinearTerm` associates a reaction that the species participates in with the species' stoichiometry in the reaction _dfba_obj_reactions (:obj:`dict` of :obj:`str`: :obj:`wc_lang.DfbaObjReaction`): all :obj:`wc_lang.DfbaObjReaction`\ s used by the :obj:`self.dfba_obj_expr` _dfba_obj_species (:obj:`list` of :obj:`wc_lang.DfbaObjSpecies:`): all species in :obj:`DfbaObjReaction`\ s used by `dfba_obj_expr`, keyed by their IDs _reaction_bounds (:obj:`dict` of :obj:`str`: :obj:`tuple`): a dictionary that maps reaction IDs to (minimum bound, maximum bound) tuples _optimal_obj_func_value (:obj:`float`): the value of objective function returned by the solver """ # default options DFBA_BOUND_SCALE_FACTOR = 1. DFBA_COEF_SCALE_FACTOR = 1. SOLVER = 'cplex' PRESOLVE = 'on' SOLVER_OPTIONS = { 'cplex': { 'parameters': { 'emphasis': { 'numerical': 1, }, 'read': { 'scale': 1, }, }, }, } OPTIMIZATION_TYPE = 'maximize' VERBOSITY = 'off' FLUX_BOUNDS_VOLUMETRIC_COMPARTMENT_ID = 'wc' NEG_POP_CONSTRAINTS = True # register the message types sent by DfbaSubmodel messages_sent = [message_types.RunFba] # register 'handle_RunFba_msg' to handle RunFba events event_handlers = [(message_types.RunFba, 'handle_RunFba_msg')] time_step_message = message_types.RunFba def __init__(self, id, dynamic_model, reactions, species, dynamic_compartments, local_species_population, dfba_time_step, options=None): """ Initialize a dFBA submodel instance Args: id (:obj:`str`): unique id of this dFBA submodel dynamic_model (:obj: `DynamicModel`): the simulation's central coordinator reactions (:obj:`list` of `wc_lang.Reaction`): the reactions modeled by this dFBA submodel species (:obj:`list` of `wc_lang.Species`): the species that participate in the reactions modeled by this dFBA submodel dynamic_compartments (:obj: `dict`): `DynamicCompartment`s, keyed by id, that contain species which participate in reactions that this dFBA submodel models, including adjacent compartments used by its transfer reactions local_species_population (:obj:`LocalSpeciesPopulation`): the store that maintains this dFBA submodel's species population dfba_time_step (:obj:`float`): time interval between FBA optimization options (:obj:`dict`, optional): dFBA submodel options Raises: :obj:`MultiAlgorithmError`: if the `dynamic_dfba_objective` cannot be found, or if some reactions are reversible, or if the provided 'dfba_bound_scale_factor' in options does not have a positive value, or if the provided 'dfba_coef_scale_factor' in options does not have a positive value, or if the provided 'solver' in options is not a valid value, or if the provided 'presolve' in options is not a valid value, or if the 'solver' value provided in the 'solver_options' in options is not the same as the name of the selected `conv_opt.Solver`, or if the provided 'flux_bounds_volumetric_compartment_id' is not a valid compartment ID in the model """ super().__init__(id, dynamic_model, reactions, species, dynamic_compartments, local_species_population, dfba_time_step, options) self.dfba_solver_options = { 'dfba_bound_scale_factor': self.DFBA_BOUND_SCALE_FACTOR, 'dfba_coef_scale_factor': self.DFBA_COEF_SCALE_FACTOR, 'solver': self.SOLVER, 'presolve': self.PRESOLVE, 'solver_options': self.SOLVER_OPTIONS, 'optimization_type': self.OPTIMIZATION_TYPE, 'flux_bounds_volumetric_compartment_id': self.FLUX_BOUNDS_VOLUMETRIC_COMPARTMENT_ID, 'verbosity': self.VERBOSITY, 'negative_pop_constraints': self.NEG_POP_CONSTRAINTS } if options is not None: if 'dfba_bound_scale_factor' in options: if options['dfba_bound_scale_factor'] <= 0.: raise MultialgorithmError(f"DfbaSubmodel {self.id}: dfba_bound_scale_factor must" f" be larger than zero but is {options["dfba_bound_scale_factor"]}") self.dfba_solver_options['dfba_bound_scale_factor'] = options['dfba_bound_scale_factor'] if 'dfba_coef_scale_factor' in options: if options['dfba_coef_scale_factor'] <= 0.: raise MultialgorithmError(f"DfbaSubmodel {self.id}: dfba_coef_scale_factor must" f" be larger than zero but is {options["dfba_coef_scale_factor"]}") self.dfba_solver_options['dfba_coef_scale_factor'] = options['dfba_coef_scale_factor'] if 'solver' in options: if options['solver'] not in conv_opt.Solver.__members__: raise MultialgorithmError(f"DfbaSubmodel {self.id}: {options["solver"]}" f" is not a valid Solver") self.dfba_solver_options['solver'] = options['solver'] if 'presolve' in options: if options['presolve'] not in conv_opt.Presolve.__members__: raise MultialgorithmError(f"DfbaSubmodel {self.id}: {options["presolve"]}" f" is not a valid Presolve option") self.dfba_solver_options['presolve'] = options['presolve'] if 'solver_options' in options: if self.dfba_solver_options['solver'] not in options['solver_options']: raise MultialgorithmError(f"DfbaSubmodel {self.id}: the solver key in" f" solver_options is not the same as the selected solver" f" '{self.dfba_solver_options["solver"]}'") self.dfba_solver_options['solver_options'] = options['solver_options'] if 'optimization_type' in options: if options['optimization_type'] not in conv_opt.ObjectiveDirection.__members__: raise MultialgorithmError(f"DfbaSubmodel {self.id}: the optimization_type in" f" options can only take 'maximize', 'max', 'minimize' or 'min' as value but is" f" '{options["optimization_type"]}'") self.dfba_solver_options['optimization_type'] = options['optimization_type'] if 'flux_bounds_volumetric_compartment_id' in options: comp_id = options['flux_bounds_volumetric_compartment_id'] if comp_id != self.FLUX_BOUNDS_VOLUMETRIC_COMPARTMENT_ID: try: flux_bound_comp = self.dynamic_model.dynamic_compartments[comp_id] except: raise MultialgorithmError(f"DfbaSubmodel {self.id}: the user-provided" f" flux_bounds_volumetric_compartment_id '{comp_id}' is not the ID" f" of a compartment in the model") self.dfba_solver_options['flux_bounds_volumetric_compartment_id'] = comp_id if 'verbosity' in options: if options['verbosity'] not in conv_opt.Verbosity.__members__: raise MultialgorithmError(f"DfbaSubmodel {self.id}: the verbosity in" f" options must be one of {set(conv_opt.Verbosity.__members__.keys())} but is" f" '{options["verbosity"]}'") self.dfba_solver_options['verbosity'] = options['verbosity'] if 'negative_pop_constraints' in options: self.dfba_solver_options['negative_pop_constraints'] = options['negative_pop_constraints'] # ensure that all reactions are irreversible errors = [] for rxn in self.reactions: if rxn.reversible: errors.append(rxn.id) if errors: rxn_ids = ', '.join(errors) raise MultialgorithmError(f"DfbaSubmodel {self.id}: reactions are reversible: {rxn_ids}") # determine set of exchange reactions, each of which has only one participant # TODO (APG): should we use the exchange reaction pattern from the config instead? self.exchange_rxns = set() for rxn in self.reactions: if len(rxn.participants) == 1: self.exchange_rxns.add(rxn) # get the dfba objective's expression dfba_objective_id = f'dfba-obj-{id}' if dfba_objective_id not in dynamic_model.dynamic_dfba_objectives: # pragma: no cover raise MultialgorithmError(f"DfbaSubmodel '{self.id}': cannot find dynamic_dfba_objective " f"{dfba_objective_id}") self.dfba_obj_expr = dynamic_model.dynamic_dfba_objectives[dfba_objective_id].wc_lang_expression # collect all wc_lang.DfbaObjReactions used by dfba_obj_expr self._dfba_obj_reactions = {} for rxn_cls in self.dfba_obj_expr.related_objects: if issubclass(rxn_cls, wc_lang.DfbaObjReaction): for rxn in self.dfba_obj_expr.related_objects[rxn_cls].values(): self._dfba_obj_reactions[rxn.id] = rxn # ensure that the dfba objective doesn't contain exchange rxns errors = [] for rxn_cls in self.dfba_obj_expr.related_objects: for rxn in self.dfba_obj_expr.related_objects[rxn_cls].values(): if rxn in self.exchange_rxns: errors.append(rxn.id) if errors: rxns = ', '.join(errors) raise MultialgorithmError(f"the dfba objective '{dfba_objective_id}' " f"uses exchange reactions: {rxns}") # TODO (APG): warn if species in dFBA objective reactions aren't used by metabolic reactions # log initialization data self.log_with_time("init: id: {}".format(id)) self.log_with_time("init: time_step: {}".format(str(dfba_time_step))) self.set_up_dfba_submodel() self.set_up_optimizations() self._model_dumps = 0 def set_up_dfba_submodel(self): """ Set up a dFBA submodel, by converting to a linear programming matrix Raises: :obj:`MultiAlgorithmError`: if the ids in :obj:`DfbaObjReaction`\ s and :obj:`Reactions`\ s intersect """ self.set_up_continuous_time_submodel() ### dFBA specific code ### # raise an error if the ids in DfbaObjReactions and Reactions intersect reaction_ids = set([rxn.id for rxn in self.reactions]) dfba_obj_reaction_ids = set(self._dfba_obj_reactions) if reaction_ids & dfba_obj_reaction_ids: raise MultialgorithmError(f"in model {self.dynamic_model.id} the ids in DfbaObjReactions " f"and Reactions intersect: {reaction_ids & dfba_obj_reaction_ids}") # TODO (APG): later: support colliding ids by creating unique ids prefixed by class # Formulate the optimization problem using the conv_opt package self._conv_model = conv_opt.Model(name='model') self._conv_variables = {} self._conv_metabolite_matrices = collections.defaultdict(list) for rxn in self.reactions: self._conv_variables[rxn.id] = conv_opt.Variable( name=rxn.id, type=conv_opt.VariableType.continuous) self._conv_model.variables.append(self._conv_variables[rxn.id]) for part in rxn.participants: self._conv_metabolite_matrices[part.species.id].append( conv_opt.LinearTerm(self._conv_variables[rxn.id], part.coefficient)) self._dfba_obj_species = [] for rxn in self._dfba_obj_reactions.values(): self._conv_variables[rxn.id] = conv_opt.Variable( name=rxn.id, type=conv_opt.VariableType.continuous, lower_bound=0.) self._conv_model.variables.append(self._conv_variables[rxn.id]) for part in rxn.dfba_obj_species: self._dfba_obj_species.append(part) self._conv_metabolite_matrices[part.species.id].append( conv_opt.LinearTerm(self._conv_variables[rxn.id], part.value)) # Set up the objective function errors = [] dfba_obj_expr_objs = self.dfba_obj_expr.lin_coeffs for rxn_cls in dfba_obj_expr_objs.values(): for rxn, lin_coef in rxn_cls.items(): if math.isnan(lin_coef): errors.append(rxn.id) self._conv_model.objective_terms.append(conv_opt.LinearTerm( self._conv_variables[rxn.id], lin_coef)) if errors: rxn_ids = ', '.join(errors) raise MultialgorithmError(f"objective function not linear: reaction(s) have NaN coefficient(s) " f"in its expression: {rxn_ids}") self._conv_model.objective_direction = \ conv_opt.ObjectiveDirection[self.dfba_solver_options['optimization_type']] self._multi_reaction_constraints = self.initialize_neg_species_pop_constraints() def get_conv_model(self): """ Get the `conv_opt` model Returns: :obj:`conv_opt.Model`: the linear programming model in `conv_opt` format """ return self._conv_model @staticmethod def _get_species_and_stoichiometry(reaction): """ Get a reaction's species and their net stoichiometries Handles both :obj:`wc_lang.Reaction` and :obj:`wc_lang.DfbaObjReaction` reactions. Args: reaction (:obj:`wc_lang.Reaction` or :obj:`wc_lang.DfbaObjReaction`): a reaction Returns: :obj:`dict`: map from species to net stoichiometry for each species in `reaction`, with entries which have net stoichiometry of 0. removed """ # get net coefficients, since a species can participate in both sides of a reaction species_net_coefficients = collections.defaultdict(float) if isinstance(reaction, wc_lang.Reaction): for part in reaction.participants: species_net_coefficients[part.species] += part.coefficient # pragma: no cover false branch in this elif; ignore missing branch coverage report elif isinstance(reaction, wc_lang.DfbaObjReaction): for part in reaction.dfba_obj_species: species_net_coefficients[part.species] += part.value species_to_rm = [s for s in species_net_coefficients if species_net_coefficients[s] == 0.] for species in species_to_rm: del species_net_coefficients[species] return species_net_coefficients NEG_POP_CONSTRAINT_PREFIX = 'neg_pop_constr' NEG_POP_CONSTRAINT_SEP = '__' LB = '__LB__' RB = '__RB__' @staticmethod def species_id_without_brkts(species_id): """ Replace brackets in a species id with codes Args: species_id (:obj:`str`): WC Lang species id Returns: :obj:`str`: species id with brackets replaced by codes Raises: :obj:`MultiAlgorithmError`: if `species_id` isn't a properly formatted :obj:`wc_lang.Species` id, or has bracket codes """ try: wc_lang.Species.parse_id(species_id) except ValueError as e: raise MultialgorithmError(e) if DfbaSubmodel.LB in species_id or DfbaSubmodel.RB in species_id: raise MultialgorithmError(f"species_id '{species_id}' already has bracket code(s)") return species_id.replace('[', DfbaSubmodel.LB).replace(']', DfbaSubmodel.RB) @staticmethod def species_id_with_brkts(species_id): """ Replace codes in a species id with brackets Args: species_id (:obj:`str`): WC Lang species id with brackets replaced by codes Returns: :obj:`str`: standard WC Lang species id Raises: :obj:`MultiAlgorithmError`: if `species_id` doesn't have bracket codes or has brackets """ if (DfbaSubmodel.LB not in species_id or DfbaSubmodel.RB not in species_id or '[' in species_id or ']' in species_id): raise MultialgorithmError(f"invalid species_id with bracket codes '{species_id}' it should be " f"species_type_id{DfbaSubmodel.LB}compartment_id{DfbaSubmodel.RB}") return species_id.replace(DfbaSubmodel.LB, '[').replace(DfbaSubmodel.RB, ']') @staticmethod def gen_neg_species_pop_constraint_id(species_id): """ Generate a negative species population constraint id Args: species_id (:obj:`str`): id of species being constrained Returns: :obj:`str`: a negative species population constraint id """ return DfbaSubmodel.NEG_POP_CONSTRAINT_PREFIX + DfbaSubmodel.NEG_POP_CONSTRAINT_SEP + species_id @staticmethod def parse_neg_species_pop_constraint_id(neg_species_pop_constraint_id): """ Parse a negative species population constraint id Args: neg_species_pop_constraint_id (:obj:`str`): a negative species population constraint id Returns: :obj:`str`: id of species being constrained """ loc = len(DfbaSubmodel.NEG_POP_CONSTRAINT_PREFIX) + len(DfbaSubmodel.NEG_POP_CONSTRAINT_SEP) return neg_species_pop_constraint_id[loc:] def initialize_neg_species_pop_constraints(self): """ Make constraints that prevent species populations from going negative A separate constraint is made for each species. These constraints prevent the species population from declining so quickly that it becomes negative in the next time step. Call this when a dFBA submodel is initialized. Do nothing if negative species population constraints are not being used. Returns: :obj:`dict` of :obj:`str`: :obj:`conv_opt.Constraint`: a map from constraint id to constraints stored in `self._conv_model.constraints` """ if not self.dfba_solver_options['negative_pop_constraints']: return {} # make map from species to pseudo-reactions that use the species reactions_using_species = collections.defaultdict(set) # add dFBA objective reactions and their species for rxn in self._dfba_obj_reactions.values(): for species in self._get_species_and_stoichiometry(rxn): reactions_using_species[species].add(rxn) # add exchange reactions and their species for rxn in self.exchange_rxns: for species in self._get_species_and_stoichiometry(rxn): reactions_using_species[species].add(rxn) multi_reaction_constraints = {} for species, rxns in reactions_using_species.items(): # create an expression for species' rate of change, in molecules / sec # ds/dt for species s is sum(coef * flux) for all rxns that use s constr_expr = [] for rxn in rxns: for rxn_species, net_coef in self._get_species_and_stoichiometry(rxn).items(): if rxn_species == species: constr_expr.append(conv_opt.LinearTerm(self._conv_variables[rxn.id], net_coef)) # optimization: only create a Constraint when the species can be consumed, # which can only occur when some of its net coefficients are negative if any([linear_term.coefficient < 0 for linear_term in constr_expr]): # before solving FBA bound_neg_species_pop_constraints() will set lower_bound of constraint # to the rate at which the amount of species goes to 0 in the next time step # constraint keeps the amount of species >= 0 over the time step constraint_id = DfbaSubmodel.gen_neg_species_pop_constraint_id(species.id) constraint = conv_opt.Constraint(constr_expr, name=constraint_id, lower_bound=None, upper_bound=None) self._conv_model.constraints.append(constraint) multi_reaction_constraints[constraint_id] = constraint return multi_reaction_constraints def set_up_optimizations(self): """ To improve performance, pre-compute and pre-allocate some data structures """ self.set_up_continuous_time_optimizations() # pre-allocate dict of reaction fluxes self.reaction_fluxes = {rxn.id: float('NaN') for rxn in self.reactions} # initialize adjustments, the dict that will hold the species population change rates self.adjustments = {} for obj_species in self._dfba_obj_species: self.adjustments[obj_species.species.id] = 0. for exchange_rxn in self.exchange_rxns: self.adjustments[exchange_rxn.participants[0].species.id] = 0. for met_id, expression in self._conv_metabolite_matrices.items(): self._conv_model.constraints.append(conv_opt.Constraint(expression, name=met_id, upper_bound=0.0, lower_bound=0.0)) def get_reaction_fluxes(self): """ Get the reaction fluxes Returns: :obj:`dict` of :obj:`str`: :obj:`float`: reaction fluxes """ return self.reaction_fluxes def determine_bounds(self): """ Determine the minimum and maximum flux bounds for each reaction Bounds provided by rate laws or flux bound constants in the model are written to `self._reaction_bounds`. """ flux_comp_id = self.dfba_solver_options['flux_bounds_volumetric_compartment_id'] if flux_comp_id == self.FLUX_BOUNDS_VOLUMETRIC_COMPARTMENT_ID: flux_comp_volume = self.dynamic_model.cell_volume() else: flux_comp_volume = self.dynamic_model.dynamic_compartments[flux_comp_id].volume() self._reaction_bounds = {} for reaction in self.reactions: # defaults # the default minimum constraint of an irreversible reaction is 0 min_constr = 0. # None indicates no maximum constraint max_constr = None # if a rate law is available, use it to compute a max bound if reaction.rate_laws: max_constr = self.calc_reaction_rate(reaction, use_enabled=False) # otherwise use the fixed bounds elif reaction.flux_bounds: rxn_bounds = reaction.flux_bounds if isinstance(rxn_bounds.min, (int, float)) and 0 <= rxn_bounds.min: min_constr = rxn_bounds.min * flux_comp_volume * scipy.constants.Avogadro if isinstance(rxn_bounds.max, (int, float)) and not math.isnan(rxn_bounds.max): max_constr = rxn_bounds.max * flux_comp_volume * scipy.constants.Avogadro self._reaction_bounds[reaction.id] = (min_constr, max_constr) def bound_neg_species_pop_constraints(self): """ Update bounds in the negative species population constraints that span multiple reactions Update the bounds in each constraint in `self._multi_reaction_constraints` that prevents a species from having a negative species population in the next time step. Call this before each run of the FBA solver. """ # set bounds in multi-reaction constraints for constraint_id, constraint in self._multi_reaction_constraints.items(): species_id = DfbaSubmodel.parse_neg_species_pop_constraint_id(constraint_id) species_pop = self.local_species_population.read_one(self.time, species_id) max_allowed_consumption_of_species = species_pop / self.time_step constraint.lower_bound = -max_allowed_consumption_of_species def update_bounds(self): """ Update the minimum and maximum bounds of `conv_opt.Variable` based on the values in `self._reaction_bounds` """ for rxn_id, (min_constr, max_constr) in self._reaction_bounds.items(): self._conv_variables[rxn_id].lower_bound = min_constr self._conv_variables[rxn_id].upper_bound = max_constr self.bound_neg_species_pop_constraints() def del_infeasible_rxns(self, conv_opt_model, copy_model=True): """ Delete infeasible reactions from a convex optimization model Delete reactions with lower bound == upper bound == 0. Args: conv_opt_model (:obj:`conv_opt.Model`): a convex optimization model copy_model (:obj:`boolean`, optional): whether to copy the convex optimization model before modifying it; defaults to :obj:`True` Returns: :obj:`conv_opt.Model`: the convex optimization model with infeasible reactions removed """ infeasible_rxns = set() # infeasible rxns are the variables with lower bound == upper bound == 0. # remove infeasible rxns from the variables # remove infeasible rxns from constraints that use them # remove constraints that have no terms remaining if copy_model: conv_opt_model = copy.deepcopy(conv_opt_model) def compute_population_change_rates(self): """ Compute the rate of change of the populations of species used by this dFBA Because FBA obtains a steady-state solution for reaction fluxes, only species that participate in the exchange reactions or dFBA objective pseudo-reactions at the edge of the FBA network can have non-zero rates of change. Updates the existing dict `self.adjustments`. """ # Calculate the adjustment for each species in a pseudo-reaction # as the sum over reactions of stoichiometry * reaction flux for species_id in self.adjustments: self.adjustments[species_id] = 0 # Compute for exchange species for exchange_rxn in self.exchange_rxns: self.adjustments[exchange_rxn.participants[0].species.id] -= \ exchange_rxn.participants[0].coefficient * self.reaction_fluxes[exchange_rxn.id] # Compute for dFBA objective species for obj_species in self._dfba_obj_species: self.adjustments[obj_species.species.id] -= \ obj_species.value * self.reaction_fluxes[obj_species.dfba_obj_reaction.id] def scale_conv_opt_model(self, conv_opt_model, copy_model=True, dfba_bound_scale_factor=None, dfba_coef_scale_factor=None): """ Apply scaling factors to a `conv_opt` model Scaling factors can be used to scale the size of bounds and objective reaction stoichiometric coefficients to address numerical problems with the linear programming solver. They are elements of `dfba_solver_options`. The `dfba_bound_scale_factor` option scales the bounds on reactions and constraints that avoid negative species populations. The `dfba_coef_scale_factor` scales the stoichiometric coefficients in dFBA objective reactions. Scaling is done by the this method. Symmetrically, the solution results are returned to the scale of the whole-cell model by inverting the consequences of these scaling factors. This is done by the `unscale_conv_opt_solution` method. Args: conv_opt_model (:obj:`conv_opt.Model`): a convex optimization model copy_model (:obj:`boolean`, optional): whether to copy the convex optimization model before scaling it; defaults to :obj:`True` dfba_bound_scale_factor (:obj:`float`, optional): factor used to scale the bounds on reactions and constraints that avoid negative species populations; if not supplied, is taken from `self.dfba_solver_options` dfba_coef_scale_factor (:obj:`float`, optional): factor used to scale the stoichiometric coefficients in dFBA objective reactions; if not supplied, is taken from `self.dfba_solver_options` Returns: :obj:`conv_opt.Model`: the scaled convex optimization model """ if copy_model: conv_opt_model = copy.deepcopy(conv_opt_model) if dfba_bound_scale_factor is None: dfba_bound_scale_factor = self.dfba_solver_options['dfba_bound_scale_factor'] if dfba_coef_scale_factor is None: dfba_coef_scale_factor = self.dfba_solver_options['dfba_coef_scale_factor'] # scale bounds # skip non-numeric bounds, such as None for variable in conv_opt_model.variables: if isinstance(variable.lower_bound, (int, float)): variable.lower_bound *= dfba_bound_scale_factor if isinstance(variable.upper_bound, (int, float)): variable.upper_bound *= dfba_bound_scale_factor # scale bounds in constraints; bound values of 0 are unchanged for constraint in conv_opt_model.constraints: # this 'if' will always be true for properly formed constraints if isinstance(constraint.lower_bound, (int, float)): constraint.lower_bound *= dfba_bound_scale_factor if isinstance(constraint.upper_bound, (int, float)): constraint.upper_bound *= dfba_bound_scale_factor # scale stoichiometric coefficient of dfba objective reactions for rxn in self._dfba_obj_reactions.values(): rxn_variable = [i for i in conv_opt_model.variables if i.name==rxn.id][0] for part in rxn.dfba_obj_species: # scale stoichiometric coefficient in steady-state constraint steady_state_const = [i for i in conv_opt_model.constraints if i.name==part.species.id][0] lin_terms = steady_state_const.terms obj_rxn_term = [i for i in lin_terms if i.variable==rxn_variable][0] obj_rxn_term.coefficient *= dfba_coef_scale_factor # scale stoichiometric coefficient in negative population constraint constraint_id = DfbaSubmodel.gen_neg_species_pop_constraint_id(part.species.id) neg_pop_const = [i for i in conv_opt_model.constraints if i.name==constraint_id] if neg_pop_const: lin_terms = neg_pop_const[0].terms obj_rxn_term = [i for i in lin_terms if i.variable==rxn_variable][0] obj_rxn_term.coefficient *= dfba_coef_scale_factor return conv_opt_model def unscale_conv_opt_solution(self, dfba_bound_scale_factor=None, dfba_coef_scale_factor=None): """ Remove scaling factors from a `conv_opt` model solution Args: dfba_bound_scale_factor (:obj:`float`, optional): factor used to scale reaction and constraint bounds; if not supplied, is taken from `self.dfba_solver_options` dfba_coef_scale_factor (:obj:`float`, optional): factor used to scale the stoichiometric coefficients in dFBA objective reactions; if not supplied, is taken from `self.dfba_solver_options` """ if dfba_bound_scale_factor is None: dfba_bound_scale_factor = self.dfba_solver_options['dfba_bound_scale_factor'] if dfba_coef_scale_factor is None: dfba_coef_scale_factor = self.dfba_solver_options['dfba_coef_scale_factor'] for rxn_variable in self._conv_model.variables: if rxn_variable.name in self._dfba_obj_reactions: self.reaction_fluxes[rxn_variable.name] /= (dfba_bound_scale_factor / dfba_coef_scale_factor) else: self.reaction_fluxes[rxn_variable.name] /= dfba_bound_scale_factor self._optimal_obj_func_value /= (dfba_bound_scale_factor / dfba_coef_scale_factor) def save_fba_solution(self, conv_opt_model, conv_opt_solution): """ Assign a FBA solution to local variables Args: conv_opt_model (:obj:`conv_opt.Model`): the convex optimization model that was solved conv_opt_solution (:obj:`conv_opt.Result`): the model's solution """ self._optimal_obj_func_value = conv_opt_solution.value for rxn_variable in conv_opt_model.variables: self.reaction_fluxes[rxn_variable.name] = rxn_variable.primal def run_fba_solver(self): """ Run the FBA solver for one time step Raises: :obj:`DynamicMultiAlgorithmError`: if no optimal solution is found """ self.determine_bounds() self.update_bounds() # print('\n--- WC Sim dFBA conv opt model ---') # print(ShowConvOptElements.show_conv_opt_model(self.get_conv_model())) # scale just before solving scaled_conv_opt_model = self.scale_conv_opt_model(self.get_conv_model()) if self._model_dumps % 100 == 0: print('\n--- Scaled WC Sim dFBA conv opt model ---') print(ShowConvOptElements.show_conv_opt_model(scaled_conv_opt_model)) print('--- END Scaled WC Sim dFBA conv opt model ---\n') # Set options for conv_opt solver options = conv_opt.SolveOptions( solver=conv_opt.Solver[self.dfba_solver_options['solver']], presolve=conv_opt.Presolve[self.dfba_solver_options['presolve']], solver_options=self.dfba_solver_options['solver_options'] ) if self._model_dumps % 100 == 0: # Set options for conv_opt solver options = conv_opt.SolveOptions( solver=conv_opt.Solver[self.dfba_solver_options['solver']], presolve=conv_opt.Presolve[self.dfba_solver_options['presolve']], verbosity=conv_opt.Verbosity[self.dfba_solver_options['verbosity']], solver_options=self.dfba_solver_options['solver_options'] ) # solve optimization model result = scaled_conv_opt_model.solve(options=options) end_time = self.time + self.time_step if result.status_code != conv_opt.StatusCode(0): raise DynamicMultialgorithmError(self.time, f"DfbaSubmodel {self.id}: " f"No optimal solution found: " f"'{result.status_message}' " f"for time step [{self.time}, {end_time}]") # save and unscale the solution self.save_fba_solution(scaled_conv_opt_model, result) self.unscale_conv_opt_solution() if self._model_dumps % 100 == 0: print() print(f'--- time {self.time}: solution ---') non_zero_fluxes = [f for f in self.reaction_fluxes.values() if 0 < f] print(f'{len(non_zero_fluxes)} non-zero reaction fluxes') for rxn_id, flux in self.reaction_fluxes.items(): if 0 < flux: print(f"{rxn_id:<20} {flux:>10.2g}") print(f"objective {self._optimal_obj_func_value:>10.2g}") # Compute the population change rates self.compute_population_change_rates() ### store results in local_species_population ### self.local_species_population.adjust_continuously(self.time, self.id, self.adjustments, time_step=self.time_step) # flush expressions that depend on species and reactions modeled by this dFBA submodel from cache # TODO (APG): OPTIMIZE DFBA CACHING: minimize flushing by implementing OPTIMIZE DFBA CACHING todos elsewhere self.dynamic_model.continuous_submodel_flush_after_populations_change(self.id) self._model_dumps += 1 ### handle DES events ### def handle_RunFba_msg(self, event): """ Handle an event containing a RunFba message Args: event (:obj:`Event`): a simulation event """ self.run_fba_solver() self.schedule_next_periodic_analysis() # TODO (APG): in conv. opt. model output: cleanup; unittests; docstrings; move to conv_opt, etc. # TODO (APG): report on values that are "not a double precision number (NaN)" # TODO (APG): move to conv. opt. package class ObjToRow(object): def __init__(self, col_widths, headers, attrs): self.col_widths = col_widths self.headers = headers self.attrs = attrs def header_rows(self): rv = [] for header_row in self.headers: row = '' for col_header, width in zip(header_row, self.col_widths): row += f'{col_header:<{width}}' rv.append(row) return rv def obj_as_row(self, obj): row = '' for attr, width in zip(self.attrs, self.col_widths): value = getattr(obj, attr) str_value = f'{str(value):<{width-1}} ' if isinstance(value, enum.Enum): str_value = f'{value.name:<{width-1}}' elif isinstance(value, float): str_value = f'{value:>{width-1}.2E} ' elif isinstance(value, int): str_value = f'{value:>{width-1}d} ' row += str_value return row class ShowConvOptElements(object): @staticmethod def show_conv_opt_variable(header=False, variable=None): headers_1 = ('name', 'type', 'lower', 'upper') headers_2 = ('', '', 'bound', 'bound',) variable_to_row = ObjToRow((18, 18, 14, 14,), [headers_1, headers_2], ('name', 'type', 'lower_bound', 'upper_bound')) if header: return variable_to_row.header_rows() return variable_to_row.obj_as_row(variable) @staticmethod def show_conv_opt_constraint(header=False, constraint=None): # constraints: skip 'dual' which isn't used headers_1 = ('name', 'lower', 'upper') headers_2 = ('', 'bound', 'bound',) constraint_to_row = ObjToRow((22, 10, 10,), [headers_1, headers_2], ('name', 'lower_bound', 'upper_bound')) if header: return constraint_to_row.header_rows() return constraint_to_row.obj_as_row(constraint) @staticmethod def show_conv_opt_variable_term(header=False, variable_term=None): # I presume that the lower and upper bounds in constraint terms are ignored variable_term_to_row = ObjToRow((18, 18), None, ('name', 'type')) if header: return variable_term_to_row.header_rows() return variable_term_to_row.obj_as_row(variable_term) @classmethod def show_conv_opt_constraints(cls, constraints): rows = [''] rows.extend(cls.show_conv_opt_constraint(header=True)) for id, constraint in constraints.items(): rows.append(cls.show_conv_opt_constraint(constraint=constraint)) rows.append('--- terms ---') for linear_term in constraint.terms: row = f'{linear_term.coefficient:<8}' row += cls.show_conv_opt_variable_term(variable_term=linear_term.variable) rows.append(row) rows.append('') return '\n'.join(rows) @classmethod def show_conv_opt_model(cls, conv_opt_model): """ Convert a `conv_opt` into a readable representation Args: conv_opt_model (:obj:`conv_opt.Model`): a convex optimization model Returns: :obj:`str`: a readable representation of `conv_opt_model` """ conv_opt_model_rows = [''] conv_opt_model_rows.append('--- conf_opt model ---') conv_opt_model_rows.append(f"name: '{conv_opt_model.name}'") # variables: skip 'primal', 'reduced_cost', which aren't used conv_opt_model_rows.append('') conv_opt_model_rows.append('--- variables ---') conv_opt_model_rows.extend(cls.show_conv_opt_variable(header=True)) for variable in conv_opt_model.variables: conv_opt_model_rows.append(cls.show_conv_opt_variable(variable=variable)) # linear terms include Variable as a field, so just print the coefficient directly conv_opt_model_rows.append('') conv_opt_model_rows.append('--- constraints ---') conv_opt_model_rows.extend(cls.show_conv_opt_constraint(header=True)) for constraint in conv_opt_model.constraints: conv_opt_model_rows.append(cls.show_conv_opt_constraint(constraint=constraint)) conv_opt_model_rows.append('--- terms ---') for linear_term in constraint.terms: row = f'{linear_term.coefficient:<8}' row += cls.show_conv_opt_variable_term(variable_term=linear_term.variable) conv_opt_model_rows.append(row) conv_opt_model_rows.append('') conv_opt_model_rows.append(f'objective direction: {conv_opt_model.objective_direction.name}') conv_opt_model_rows.append('') conv_opt_model_rows.append('--- objective terms ---') for objective_term in conv_opt_model.objective_terms: row = f'{objective_term.coefficient:<8}' row += cls.show_conv_opt_variable_term(variable_term=objective_term.variable) conv_opt_model_rows.append(row) return '\n'.join(conv_opt_model_rows)
""" A submodel that uses Dynamic Flux Balance Analysis (dFBA) to model a set of reactions :Author: Yin Hoon Chew <yinhoon.chew@mssm.edu> :Author: Arthur Goldberg <Arthur.Goldberg@mssm.edu> :Date: 2020-07-29 :Copyright: 2016-2020, Karr Lab :License: MIT """ import collections import conv_opt import copy import enum import itertools import math import scipy.constants import wc_lang from wc_sim import message_types from wc_sim.config import core as config_core_multialgorithm from wc_sim.multialgorithm_errors import DynamicMultialgorithmError, MultialgorithmError from wc_sim.submodels.dynamic_submodel import ContinuousTimeSubmodel class DfbaSubmodel(ContinuousTimeSubmodel): """ Use dynamic Flux Balance Analysis to predict the dynamics of chemical species in a container Attributes: DFBA_BOUND_SCALE_FACTOR (:obj:`float`): scaling factor for the bounds on reactions and constraints that avoid negative species populations; the default value is 1. DFBA_COEF_SCALE_FACTOR (:obj:`float`): scaling factor for the stoichiometric coefficients in dFBA objective reactions; the default value is 1. SOLVER (:obj:`str`): name of the selected solver in conv_opt, the default value is 'cplex' PRESOLVE (:obj:`str`): presolve mode in `conv_opt` ('auto', 'on', 'off'), the default value is 'on' SOLVER_OPTIONS (:obj:`dict`): parameters for the solver; default values are provided for 'cplex' OPTIMIZATION_TYPE (:obj:`str`): direction of optimization ('maximize', 'max', 'minimize', 'min'); the default value is 'maximize' FLUX_BOUNDS_VOLUMETRIC_COMPARTMENT_ID (:obj:`str`): id of the compartment to which the measured flux bounds are normalized, the default is the whole-cell VERBOSITY (:obj:`str`): output verbosity of the solver NEG_POP_CONSTRAINTS (:obj:`boolean`): whether the constraints that prevent negative species over the next time-step should be used; defaults to :obj:`True` dfba_solver_options (:obj:`dict` of :obj:`str`: :obj:`any`): options for solving dFBA submodel reaction_fluxes (:obj:`dict` of :obj:`str`: :obj:`float`): reaction fluxes data structure, which is pre-allocated dfba_obj_expr (:obj:`ParsedExpression`): an analyzed and validated dFBA objective expression exchange_rxns (:obj:`set` of :obj:`wc_lang.Reactions`): set of exchange and demand reactions _multi_reaction_constraints (:obj:`dict` of :obj:`str`: :obj:`conv_opt.Constraint`): a map from constraint id to constraints that avoid negative species populations in `self._conv_model.constraints` _conv_model (:obj:`conv_opt.Model`): linear programming model in `conv_opt` format _conv_variables (:obj:`dict` of :obj:`str`: :obj:`conv_opt.Variable`): a dictionary mapping reaction IDs to their associated `conv_opt.Variable` objects _conv_metabolite_matrices (:obj:`dict` of :obj:`str`: :obj:`list`): a dictionary mapping metabolite species IDs to lists of :obj:`conv_opt.LinearTerm` objects; each :obj:`conv_opt.LinearTerm` associates a reaction that the species participates in with the species' stoichiometry in the reaction _dfba_obj_reactions (:obj:`dict` of :obj:`str`: :obj:`wc_lang.DfbaObjReaction`): all :obj:`wc_lang.DfbaObjReaction`\ s used by the :obj:`self.dfba_obj_expr` _dfba_obj_species (:obj:`list` of :obj:`wc_lang.DfbaObjSpecies:`): all species in :obj:`DfbaObjReaction`\ s used by `dfba_obj_expr`, keyed by their IDs _reaction_bounds (:obj:`dict` of :obj:`str`: :obj:`tuple`): a dictionary that maps reaction IDs to (minimum bound, maximum bound) tuples _optimal_obj_func_value (:obj:`float`): the value of objective function returned by the solver """ # default options DFBA_BOUND_SCALE_FACTOR = 1. DFBA_COEF_SCALE_FACTOR = 1. SOLVER = 'cplex' PRESOLVE = 'on' SOLVER_OPTIONS = { 'cplex': { 'parameters': { 'emphasis': { 'numerical': 1, }, 'read': { 'scale': 1, }, }, }, } OPTIMIZATION_TYPE = 'maximize' VERBOSITY = 'off' FLUX_BOUNDS_VOLUMETRIC_COMPARTMENT_ID = 'wc' NEG_POP_CONSTRAINTS = True # register the message types sent by DfbaSubmodel messages_sent = [message_types.RunFba] # register 'handle_RunFba_msg' to handle RunFba events event_handlers = [(message_types.RunFba, 'handle_RunFba_msg')] time_step_message = message_types.RunFba def __init__(self, id, dynamic_model, reactions, species, dynamic_compartments, local_species_population, dfba_time_step, options=None): """ Initialize a dFBA submodel instance Args: id (:obj:`str`): unique id of this dFBA submodel dynamic_model (:obj: `DynamicModel`): the simulation's central coordinator reactions (:obj:`list` of `wc_lang.Reaction`): the reactions modeled by this dFBA submodel species (:obj:`list` of `wc_lang.Species`): the species that participate in the reactions modeled by this dFBA submodel dynamic_compartments (:obj: `dict`): `DynamicCompartment`s, keyed by id, that contain species which participate in reactions that this dFBA submodel models, including adjacent compartments used by its transfer reactions local_species_population (:obj:`LocalSpeciesPopulation`): the store that maintains this dFBA submodel's species population dfba_time_step (:obj:`float`): time interval between FBA optimization options (:obj:`dict`, optional): dFBA submodel options Raises: :obj:`MultiAlgorithmError`: if the `dynamic_dfba_objective` cannot be found, or if some reactions are reversible, or if the provided 'dfba_bound_scale_factor' in options does not have a positive value, or if the provided 'dfba_coef_scale_factor' in options does not have a positive value, or if the provided 'solver' in options is not a valid value, or if the provided 'presolve' in options is not a valid value, or if the 'solver' value provided in the 'solver_options' in options is not the same as the name of the selected `conv_opt.Solver`, or if the provided 'flux_bounds_volumetric_compartment_id' is not a valid compartment ID in the model """ super().__init__(id, dynamic_model, reactions, species, dynamic_compartments, local_species_population, dfba_time_step, options) self.dfba_solver_options = { 'dfba_bound_scale_factor': self.DFBA_BOUND_SCALE_FACTOR, 'dfba_coef_scale_factor': self.DFBA_COEF_SCALE_FACTOR, 'solver': self.SOLVER, 'presolve': self.PRESOLVE, 'solver_options': self.SOLVER_OPTIONS, 'optimization_type': self.OPTIMIZATION_TYPE, 'flux_bounds_volumetric_compartment_id': self.FLUX_BOUNDS_VOLUMETRIC_COMPARTMENT_ID, 'verbosity': self.VERBOSITY, 'negative_pop_constraints': self.NEG_POP_CONSTRAINTS } if options is not None: if 'dfba_bound_scale_factor' in options: if options['dfba_bound_scale_factor'] <= 0.: raise MultialgorithmError(f"DfbaSubmodel {self.id}: dfba_bound_scale_factor must" f" be larger than zero but is {options['dfba_bound_scale_factor']}") self.dfba_solver_options['dfba_bound_scale_factor'] = options['dfba_bound_scale_factor'] if 'dfba_coef_scale_factor' in options: if options['dfba_coef_scale_factor'] <= 0.: raise MultialgorithmError(f"DfbaSubmodel {self.id}: dfba_coef_scale_factor must" f" be larger than zero but is {options['dfba_coef_scale_factor']}") self.dfba_solver_options['dfba_coef_scale_factor'] = options['dfba_coef_scale_factor'] if 'solver' in options: if options['solver'] not in conv_opt.Solver.__members__: raise MultialgorithmError(f"DfbaSubmodel {self.id}: {options['solver']}" f" is not a valid Solver") self.dfba_solver_options['solver'] = options['solver'] if 'presolve' in options: if options['presolve'] not in conv_opt.Presolve.__members__: raise MultialgorithmError(f"DfbaSubmodel {self.id}: {options['presolve']}" f" is not a valid Presolve option") self.dfba_solver_options['presolve'] = options['presolve'] if 'solver_options' in options: if self.dfba_solver_options['solver'] not in options['solver_options']: raise MultialgorithmError(f"DfbaSubmodel {self.id}: the solver key in" f" solver_options is not the same as the selected solver" f" '{self.dfba_solver_options['solver']}'") self.dfba_solver_options['solver_options'] = options['solver_options'] if 'optimization_type' in options: if options['optimization_type'] not in conv_opt.ObjectiveDirection.__members__: raise MultialgorithmError(f"DfbaSubmodel {self.id}: the optimization_type in" f" options can only take 'maximize', 'max', 'minimize' or 'min' as value but is" f" '{options['optimization_type']}'") self.dfba_solver_options['optimization_type'] = options['optimization_type'] if 'flux_bounds_volumetric_compartment_id' in options: comp_id = options['flux_bounds_volumetric_compartment_id'] if comp_id != self.FLUX_BOUNDS_VOLUMETRIC_COMPARTMENT_ID: try: flux_bound_comp = self.dynamic_model.dynamic_compartments[comp_id] except: raise MultialgorithmError(f"DfbaSubmodel {self.id}: the user-provided" f" flux_bounds_volumetric_compartment_id '{comp_id}' is not the ID" f" of a compartment in the model") self.dfba_solver_options['flux_bounds_volumetric_compartment_id'] = comp_id if 'verbosity' in options: if options['verbosity'] not in conv_opt.Verbosity.__members__: raise MultialgorithmError(f"DfbaSubmodel {self.id}: the verbosity in" f" options must be one of {set(conv_opt.Verbosity.__members__.keys())} but is" f" '{options['verbosity']}'") self.dfba_solver_options['verbosity'] = options['verbosity'] if 'negative_pop_constraints' in options: self.dfba_solver_options['negative_pop_constraints'] = options['negative_pop_constraints'] # ensure that all reactions are irreversible errors = [] for rxn in self.reactions: if rxn.reversible: errors.append(rxn.id) if errors: rxn_ids = ', '.join(errors) raise MultialgorithmError(f"DfbaSubmodel {self.id}: reactions are reversible: {rxn_ids}") # determine set of exchange reactions, each of which has only one participant # TODO (APG): should we use the exchange reaction pattern from the config instead? self.exchange_rxns = set() for rxn in self.reactions: if len(rxn.participants) == 1: self.exchange_rxns.add(rxn) # get the dfba objective's expression dfba_objective_id = f'dfba-obj-{id}' if dfba_objective_id not in dynamic_model.dynamic_dfba_objectives: # pragma: no cover raise MultialgorithmError(f"DfbaSubmodel '{self.id}': cannot find dynamic_dfba_objective " f"{dfba_objective_id}") self.dfba_obj_expr = dynamic_model.dynamic_dfba_objectives[dfba_objective_id].wc_lang_expression # collect all wc_lang.DfbaObjReactions used by dfba_obj_expr self._dfba_obj_reactions = {} for rxn_cls in self.dfba_obj_expr.related_objects: if issubclass(rxn_cls, wc_lang.DfbaObjReaction): for rxn in self.dfba_obj_expr.related_objects[rxn_cls].values(): self._dfba_obj_reactions[rxn.id] = rxn # ensure that the dfba objective doesn't contain exchange rxns errors = [] for rxn_cls in self.dfba_obj_expr.related_objects: for rxn in self.dfba_obj_expr.related_objects[rxn_cls].values(): if rxn in self.exchange_rxns: errors.append(rxn.id) if errors: rxns = ', '.join(errors) raise MultialgorithmError(f"the dfba objective '{dfba_objective_id}' " f"uses exchange reactions: {rxns}") # TODO (APG): warn if species in dFBA objective reactions aren't used by metabolic reactions # log initialization data self.log_with_time("init: id: {}".format(id)) self.log_with_time("init: time_step: {}".format(str(dfba_time_step))) self.set_up_dfba_submodel() self.set_up_optimizations() self._model_dumps = 0 def set_up_dfba_submodel(self): """ Set up a dFBA submodel, by converting to a linear programming matrix Raises: :obj:`MultiAlgorithmError`: if the ids in :obj:`DfbaObjReaction`\ s and :obj:`Reactions`\ s intersect """ self.set_up_continuous_time_submodel() ### dFBA specific code ### # raise an error if the ids in DfbaObjReactions and Reactions intersect reaction_ids = set([rxn.id for rxn in self.reactions]) dfba_obj_reaction_ids = set(self._dfba_obj_reactions) if reaction_ids & dfba_obj_reaction_ids: raise MultialgorithmError(f"in model {self.dynamic_model.id} the ids in DfbaObjReactions " f"and Reactions intersect: {reaction_ids & dfba_obj_reaction_ids}") # TODO (APG): later: support colliding ids by creating unique ids prefixed by class # Formulate the optimization problem using the conv_opt package self._conv_model = conv_opt.Model(name='model') self._conv_variables = {} self._conv_metabolite_matrices = collections.defaultdict(list) for rxn in self.reactions: self._conv_variables[rxn.id] = conv_opt.Variable( name=rxn.id, type=conv_opt.VariableType.continuous) self._conv_model.variables.append(self._conv_variables[rxn.id]) for part in rxn.participants: self._conv_metabolite_matrices[part.species.id].append( conv_opt.LinearTerm(self._conv_variables[rxn.id], part.coefficient)) self._dfba_obj_species = [] for rxn in self._dfba_obj_reactions.values(): self._conv_variables[rxn.id] = conv_opt.Variable( name=rxn.id, type=conv_opt.VariableType.continuous, lower_bound=0.) self._conv_model.variables.append(self._conv_variables[rxn.id]) for part in rxn.dfba_obj_species: self._dfba_obj_species.append(part) self._conv_metabolite_matrices[part.species.id].append( conv_opt.LinearTerm(self._conv_variables[rxn.id], part.value)) # Set up the objective function errors = [] dfba_obj_expr_objs = self.dfba_obj_expr.lin_coeffs for rxn_cls in dfba_obj_expr_objs.values(): for rxn, lin_coef in rxn_cls.items(): if math.isnan(lin_coef): errors.append(rxn.id) self._conv_model.objective_terms.append(conv_opt.LinearTerm( self._conv_variables[rxn.id], lin_coef)) if errors: rxn_ids = ', '.join(errors) raise MultialgorithmError(f"objective function not linear: reaction(s) have NaN coefficient(s) " f"in its expression: {rxn_ids}") self._conv_model.objective_direction = \ conv_opt.ObjectiveDirection[self.dfba_solver_options['optimization_type']] self._multi_reaction_constraints = self.initialize_neg_species_pop_constraints() def get_conv_model(self): """ Get the `conv_opt` model Returns: :obj:`conv_opt.Model`: the linear programming model in `conv_opt` format """ return self._conv_model @staticmethod def _get_species_and_stoichiometry(reaction): """ Get a reaction's species and their net stoichiometries Handles both :obj:`wc_lang.Reaction` and :obj:`wc_lang.DfbaObjReaction` reactions. Args: reaction (:obj:`wc_lang.Reaction` or :obj:`wc_lang.DfbaObjReaction`): a reaction Returns: :obj:`dict`: map from species to net stoichiometry for each species in `reaction`, with entries which have net stoichiometry of 0. removed """ # get net coefficients, since a species can participate in both sides of a reaction species_net_coefficients = collections.defaultdict(float) if isinstance(reaction, wc_lang.Reaction): for part in reaction.participants: species_net_coefficients[part.species] += part.coefficient # pragma: no cover false branch in this elif; ignore missing branch coverage report elif isinstance(reaction, wc_lang.DfbaObjReaction): for part in reaction.dfba_obj_species: species_net_coefficients[part.species] += part.value species_to_rm = [s for s in species_net_coefficients if species_net_coefficients[s] == 0.] for species in species_to_rm: del species_net_coefficients[species] return species_net_coefficients NEG_POP_CONSTRAINT_PREFIX = 'neg_pop_constr' NEG_POP_CONSTRAINT_SEP = '__' LB = '__LB__' RB = '__RB__' @staticmethod def species_id_without_brkts(species_id): """ Replace brackets in a species id with codes Args: species_id (:obj:`str`): WC Lang species id Returns: :obj:`str`: species id with brackets replaced by codes Raises: :obj:`MultiAlgorithmError`: if `species_id` isn't a properly formatted :obj:`wc_lang.Species` id, or has bracket codes """ try: wc_lang.Species.parse_id(species_id) except ValueError as e: raise MultialgorithmError(e) if DfbaSubmodel.LB in species_id or DfbaSubmodel.RB in species_id: raise MultialgorithmError(f"species_id '{species_id}' already has bracket code(s)") return species_id.replace('[', DfbaSubmodel.LB).replace(']', DfbaSubmodel.RB) @staticmethod def species_id_with_brkts(species_id): """ Replace codes in a species id with brackets Args: species_id (:obj:`str`): WC Lang species id with brackets replaced by codes Returns: :obj:`str`: standard WC Lang species id Raises: :obj:`MultiAlgorithmError`: if `species_id` doesn't have bracket codes or has brackets """ if (DfbaSubmodel.LB not in species_id or DfbaSubmodel.RB not in species_id or '[' in species_id or ']' in species_id): raise MultialgorithmError(f"invalid species_id with bracket codes '{species_id}' it should be " f"species_type_id{DfbaSubmodel.LB}compartment_id{DfbaSubmodel.RB}") return species_id.replace(DfbaSubmodel.LB, '[').replace(DfbaSubmodel.RB, ']') @staticmethod def gen_neg_species_pop_constraint_id(species_id): """ Generate a negative species population constraint id Args: species_id (:obj:`str`): id of species being constrained Returns: :obj:`str`: a negative species population constraint id """ return DfbaSubmodel.NEG_POP_CONSTRAINT_PREFIX + DfbaSubmodel.NEG_POP_CONSTRAINT_SEP + species_id @staticmethod def parse_neg_species_pop_constraint_id(neg_species_pop_constraint_id): """ Parse a negative species population constraint id Args: neg_species_pop_constraint_id (:obj:`str`): a negative species population constraint id Returns: :obj:`str`: id of species being constrained """ loc = len(DfbaSubmodel.NEG_POP_CONSTRAINT_PREFIX) + len(DfbaSubmodel.NEG_POP_CONSTRAINT_SEP) return neg_species_pop_constraint_id[loc:] def initialize_neg_species_pop_constraints(self): """ Make constraints that prevent species populations from going negative A separate constraint is made for each species. These constraints prevent the species population from declining so quickly that it becomes negative in the next time step. Call this when a dFBA submodel is initialized. Do nothing if negative species population constraints are not being used. Returns: :obj:`dict` of :obj:`str`: :obj:`conv_opt.Constraint`: a map from constraint id to constraints stored in `self._conv_model.constraints` """ if not self.dfba_solver_options['negative_pop_constraints']: return {} # make map from species to pseudo-reactions that use the species reactions_using_species = collections.defaultdict(set) # add dFBA objective reactions and their species for rxn in self._dfba_obj_reactions.values(): for species in self._get_species_and_stoichiometry(rxn): reactions_using_species[species].add(rxn) # add exchange reactions and their species for rxn in self.exchange_rxns: for species in self._get_species_and_stoichiometry(rxn): reactions_using_species[species].add(rxn) multi_reaction_constraints = {} for species, rxns in reactions_using_species.items(): # create an expression for species' rate of change, in molecules / sec # ds/dt for species s is sum(coef * flux) for all rxns that use s constr_expr = [] for rxn in rxns: for rxn_species, net_coef in self._get_species_and_stoichiometry(rxn).items(): if rxn_species == species: constr_expr.append(conv_opt.LinearTerm(self._conv_variables[rxn.id], net_coef)) # optimization: only create a Constraint when the species can be consumed, # which can only occur when some of its net coefficients are negative if any([linear_term.coefficient < 0 for linear_term in constr_expr]): # before solving FBA bound_neg_species_pop_constraints() will set lower_bound of constraint # to the rate at which the amount of species goes to 0 in the next time step # constraint keeps the amount of species >= 0 over the time step constraint_id = DfbaSubmodel.gen_neg_species_pop_constraint_id(species.id) constraint = conv_opt.Constraint(constr_expr, name=constraint_id, lower_bound=None, upper_bound=None) self._conv_model.constraints.append(constraint) multi_reaction_constraints[constraint_id] = constraint return multi_reaction_constraints def set_up_optimizations(self): """ To improve performance, pre-compute and pre-allocate some data structures """ self.set_up_continuous_time_optimizations() # pre-allocate dict of reaction fluxes self.reaction_fluxes = {rxn.id: float('NaN') for rxn in self.reactions} # initialize adjustments, the dict that will hold the species population change rates self.adjustments = {} for obj_species in self._dfba_obj_species: self.adjustments[obj_species.species.id] = 0. for exchange_rxn in self.exchange_rxns: self.adjustments[exchange_rxn.participants[0].species.id] = 0. for met_id, expression in self._conv_metabolite_matrices.items(): self._conv_model.constraints.append(conv_opt.Constraint(expression, name=met_id, upper_bound=0.0, lower_bound=0.0)) def get_reaction_fluxes(self): """ Get the reaction fluxes Returns: :obj:`dict` of :obj:`str`: :obj:`float`: reaction fluxes """ return self.reaction_fluxes def determine_bounds(self): """ Determine the minimum and maximum flux bounds for each reaction Bounds provided by rate laws or flux bound constants in the model are written to `self._reaction_bounds`. """ flux_comp_id = self.dfba_solver_options['flux_bounds_volumetric_compartment_id'] if flux_comp_id == self.FLUX_BOUNDS_VOLUMETRIC_COMPARTMENT_ID: flux_comp_volume = self.dynamic_model.cell_volume() else: flux_comp_volume = self.dynamic_model.dynamic_compartments[flux_comp_id].volume() self._reaction_bounds = {} for reaction in self.reactions: # defaults # the default minimum constraint of an irreversible reaction is 0 min_constr = 0. # None indicates no maximum constraint max_constr = None # if a rate law is available, use it to compute a max bound if reaction.rate_laws: max_constr = self.calc_reaction_rate(reaction, use_enabled=False) # otherwise use the fixed bounds elif reaction.flux_bounds: rxn_bounds = reaction.flux_bounds if isinstance(rxn_bounds.min, (int, float)) and 0 <= rxn_bounds.min: min_constr = rxn_bounds.min * flux_comp_volume * scipy.constants.Avogadro if isinstance(rxn_bounds.max, (int, float)) and not math.isnan(rxn_bounds.max): max_constr = rxn_bounds.max * flux_comp_volume * scipy.constants.Avogadro self._reaction_bounds[reaction.id] = (min_constr, max_constr) def bound_neg_species_pop_constraints(self): """ Update bounds in the negative species population constraints that span multiple reactions Update the bounds in each constraint in `self._multi_reaction_constraints` that prevents a species from having a negative species population in the next time step. Call this before each run of the FBA solver. """ # set bounds in multi-reaction constraints for constraint_id, constraint in self._multi_reaction_constraints.items(): species_id = DfbaSubmodel.parse_neg_species_pop_constraint_id(constraint_id) species_pop = self.local_species_population.read_one(self.time, species_id) max_allowed_consumption_of_species = species_pop / self.time_step constraint.lower_bound = -max_allowed_consumption_of_species def update_bounds(self): """ Update the minimum and maximum bounds of `conv_opt.Variable` based on the values in `self._reaction_bounds` """ for rxn_id, (min_constr, max_constr) in self._reaction_bounds.items(): self._conv_variables[rxn_id].lower_bound = min_constr self._conv_variables[rxn_id].upper_bound = max_constr self.bound_neg_species_pop_constraints() def del_infeasible_rxns(self, conv_opt_model, copy_model=True): """ Delete infeasible reactions from a convex optimization model Delete reactions with lower bound == upper bound == 0. Args: conv_opt_model (:obj:`conv_opt.Model`): a convex optimization model copy_model (:obj:`boolean`, optional): whether to copy the convex optimization model before modifying it; defaults to :obj:`True` Returns: :obj:`conv_opt.Model`: the convex optimization model with infeasible reactions removed """ infeasible_rxns = set() # infeasible rxns are the variables with lower bound == upper bound == 0. # remove infeasible rxns from the variables # remove infeasible rxns from constraints that use them # remove constraints that have no terms remaining if copy_model: conv_opt_model = copy.deepcopy(conv_opt_model) def compute_population_change_rates(self): """ Compute the rate of change of the populations of species used by this dFBA Because FBA obtains a steady-state solution for reaction fluxes, only species that participate in the exchange reactions or dFBA objective pseudo-reactions at the edge of the FBA network can have non-zero rates of change. Updates the existing dict `self.adjustments`. """ # Calculate the adjustment for each species in a pseudo-reaction # as the sum over reactions of stoichiometry * reaction flux for species_id in self.adjustments: self.adjustments[species_id] = 0 # Compute for exchange species for exchange_rxn in self.exchange_rxns: self.adjustments[exchange_rxn.participants[0].species.id] -= \ exchange_rxn.participants[0].coefficient * self.reaction_fluxes[exchange_rxn.id] # Compute for dFBA objective species for obj_species in self._dfba_obj_species: self.adjustments[obj_species.species.id] -= \ obj_species.value * self.reaction_fluxes[obj_species.dfba_obj_reaction.id] def scale_conv_opt_model(self, conv_opt_model, copy_model=True, dfba_bound_scale_factor=None, dfba_coef_scale_factor=None): """ Apply scaling factors to a `conv_opt` model Scaling factors can be used to scale the size of bounds and objective reaction stoichiometric coefficients to address numerical problems with the linear programming solver. They are elements of `dfba_solver_options`. The `dfba_bound_scale_factor` option scales the bounds on reactions and constraints that avoid negative species populations. The `dfba_coef_scale_factor` scales the stoichiometric coefficients in dFBA objective reactions. Scaling is done by the this method. Symmetrically, the solution results are returned to the scale of the whole-cell model by inverting the consequences of these scaling factors. This is done by the `unscale_conv_opt_solution` method. Args: conv_opt_model (:obj:`conv_opt.Model`): a convex optimization model copy_model (:obj:`boolean`, optional): whether to copy the convex optimization model before scaling it; defaults to :obj:`True` dfba_bound_scale_factor (:obj:`float`, optional): factor used to scale the bounds on reactions and constraints that avoid negative species populations; if not supplied, is taken from `self.dfba_solver_options` dfba_coef_scale_factor (:obj:`float`, optional): factor used to scale the stoichiometric coefficients in dFBA objective reactions; if not supplied, is taken from `self.dfba_solver_options` Returns: :obj:`conv_opt.Model`: the scaled convex optimization model """ if copy_model: conv_opt_model = copy.deepcopy(conv_opt_model) if dfba_bound_scale_factor is None: dfba_bound_scale_factor = self.dfba_solver_options['dfba_bound_scale_factor'] if dfba_coef_scale_factor is None: dfba_coef_scale_factor = self.dfba_solver_options['dfba_coef_scale_factor'] # scale bounds # skip non-numeric bounds, such as None for variable in conv_opt_model.variables: if isinstance(variable.lower_bound, (int, float)): variable.lower_bound *= dfba_bound_scale_factor if isinstance(variable.upper_bound, (int, float)): variable.upper_bound *= dfba_bound_scale_factor # scale bounds in constraints; bound values of 0 are unchanged for constraint in conv_opt_model.constraints: # this 'if' will always be true for properly formed constraints if isinstance(constraint.lower_bound, (int, float)): constraint.lower_bound *= dfba_bound_scale_factor if isinstance(constraint.upper_bound, (int, float)): constraint.upper_bound *= dfba_bound_scale_factor # scale stoichiometric coefficient of dfba objective reactions for rxn in self._dfba_obj_reactions.values(): rxn_variable = [i for i in conv_opt_model.variables if i.name==rxn.id][0] for part in rxn.dfba_obj_species: # scale stoichiometric coefficient in steady-state constraint steady_state_const = [i for i in conv_opt_model.constraints if i.name==part.species.id][0] lin_terms = steady_state_const.terms obj_rxn_term = [i for i in lin_terms if i.variable==rxn_variable][0] obj_rxn_term.coefficient *= dfba_coef_scale_factor # scale stoichiometric coefficient in negative population constraint constraint_id = DfbaSubmodel.gen_neg_species_pop_constraint_id(part.species.id) neg_pop_const = [i for i in conv_opt_model.constraints if i.name==constraint_id] if neg_pop_const: lin_terms = neg_pop_const[0].terms obj_rxn_term = [i for i in lin_terms if i.variable==rxn_variable][0] obj_rxn_term.coefficient *= dfba_coef_scale_factor return conv_opt_model def unscale_conv_opt_solution(self, dfba_bound_scale_factor=None, dfba_coef_scale_factor=None): """ Remove scaling factors from a `conv_opt` model solution Args: dfba_bound_scale_factor (:obj:`float`, optional): factor used to scale reaction and constraint bounds; if not supplied, is taken from `self.dfba_solver_options` dfba_coef_scale_factor (:obj:`float`, optional): factor used to scale the stoichiometric coefficients in dFBA objective reactions; if not supplied, is taken from `self.dfba_solver_options` """ if dfba_bound_scale_factor is None: dfba_bound_scale_factor = self.dfba_solver_options['dfba_bound_scale_factor'] if dfba_coef_scale_factor is None: dfba_coef_scale_factor = self.dfba_solver_options['dfba_coef_scale_factor'] for rxn_variable in self._conv_model.variables: if rxn_variable.name in self._dfba_obj_reactions: self.reaction_fluxes[rxn_variable.name] /= (dfba_bound_scale_factor / dfba_coef_scale_factor) else: self.reaction_fluxes[rxn_variable.name] /= dfba_bound_scale_factor self._optimal_obj_func_value /= (dfba_bound_scale_factor / dfba_coef_scale_factor) def save_fba_solution(self, conv_opt_model, conv_opt_solution): """ Assign a FBA solution to local variables Args: conv_opt_model (:obj:`conv_opt.Model`): the convex optimization model that was solved conv_opt_solution (:obj:`conv_opt.Result`): the model's solution """ self._optimal_obj_func_value = conv_opt_solution.value for rxn_variable in conv_opt_model.variables: self.reaction_fluxes[rxn_variable.name] = rxn_variable.primal def run_fba_solver(self): """ Run the FBA solver for one time step Raises: :obj:`DynamicMultiAlgorithmError`: if no optimal solution is found """ self.determine_bounds() self.update_bounds() # print('\n--- WC Sim dFBA conv opt model ---') # print(ShowConvOptElements.show_conv_opt_model(self.get_conv_model())) # scale just before solving scaled_conv_opt_model = self.scale_conv_opt_model(self.get_conv_model()) if self._model_dumps % 100 == 0: print('\n--- Scaled WC Sim dFBA conv opt model ---') print(ShowConvOptElements.show_conv_opt_model(scaled_conv_opt_model)) print('--- END Scaled WC Sim dFBA conv opt model ---\n') # Set options for conv_opt solver options = conv_opt.SolveOptions( solver=conv_opt.Solver[self.dfba_solver_options['solver']], presolve=conv_opt.Presolve[self.dfba_solver_options['presolve']], solver_options=self.dfba_solver_options['solver_options'] ) if self._model_dumps % 100 == 0: # Set options for conv_opt solver options = conv_opt.SolveOptions( solver=conv_opt.Solver[self.dfba_solver_options['solver']], presolve=conv_opt.Presolve[self.dfba_solver_options['presolve']], verbosity=conv_opt.Verbosity[self.dfba_solver_options['verbosity']], solver_options=self.dfba_solver_options['solver_options'] ) # solve optimization model result = scaled_conv_opt_model.solve(options=options) end_time = self.time + self.time_step if result.status_code != conv_opt.StatusCode(0): raise DynamicMultialgorithmError(self.time, f"DfbaSubmodel {self.id}: " f"No optimal solution found: " f"'{result.status_message}' " f"for time step [{self.time}, {end_time}]") # save and unscale the solution self.save_fba_solution(scaled_conv_opt_model, result) self.unscale_conv_opt_solution() if self._model_dumps % 100 == 0: print() print(f'--- time {self.time}: solution ---') non_zero_fluxes = [f for f in self.reaction_fluxes.values() if 0 < f] print(f'{len(non_zero_fluxes)} non-zero reaction fluxes') for rxn_id, flux in self.reaction_fluxes.items(): if 0 < flux: print(f"{rxn_id:<20} {flux:>10.2g}") print(f"objective {self._optimal_obj_func_value:>10.2g}") # Compute the population change rates self.compute_population_change_rates() ### store results in local_species_population ### self.local_species_population.adjust_continuously(self.time, self.id, self.adjustments, time_step=self.time_step) # flush expressions that depend on species and reactions modeled by this dFBA submodel from cache # TODO (APG): OPTIMIZE DFBA CACHING: minimize flushing by implementing OPTIMIZE DFBA CACHING todos elsewhere self.dynamic_model.continuous_submodel_flush_after_populations_change(self.id) self._model_dumps += 1 ### handle DES events ### def handle_RunFba_msg(self, event): """ Handle an event containing a RunFba message Args: event (:obj:`Event`): a simulation event """ self.run_fba_solver() self.schedule_next_periodic_analysis() # TODO (APG): in conv. opt. model output: cleanup; unittests; docstrings; move to conv_opt, etc. # TODO (APG): report on values that are "not a double precision number (NaN)" # TODO (APG): move to conv. opt. package class ObjToRow(object): def __init__(self, col_widths, headers, attrs): self.col_widths = col_widths self.headers = headers self.attrs = attrs def header_rows(self): rv = [] for header_row in self.headers: row = '' for col_header, width in zip(header_row, self.col_widths): row += f'{col_header:<{width}}' rv.append(row) return rv def obj_as_row(self, obj): row = '' for attr, width in zip(self.attrs, self.col_widths): value = getattr(obj, attr) str_value = f'{str(value):<{width-1}} ' if isinstance(value, enum.Enum): str_value = f'{value.name:<{width-1}}' elif isinstance(value, float): str_value = f'{value:>{width-1}.2E} ' elif isinstance(value, int): str_value = f'{value:>{width-1}d} ' row += str_value return row class ShowConvOptElements(object): @staticmethod def show_conv_opt_variable(header=False, variable=None): headers_1 = ('name', 'type', 'lower', 'upper') headers_2 = ('', '', 'bound', 'bound',) variable_to_row = ObjToRow((18, 18, 14, 14,), [headers_1, headers_2], ('name', 'type', 'lower_bound', 'upper_bound')) if header: return variable_to_row.header_rows() return variable_to_row.obj_as_row(variable) @staticmethod def show_conv_opt_constraint(header=False, constraint=None): # constraints: skip 'dual' which isn't used headers_1 = ('name', 'lower', 'upper') headers_2 = ('', 'bound', 'bound',) constraint_to_row = ObjToRow((22, 10, 10,), [headers_1, headers_2], ('name', 'lower_bound', 'upper_bound')) if header: return constraint_to_row.header_rows() return constraint_to_row.obj_as_row(constraint) @staticmethod def show_conv_opt_variable_term(header=False, variable_term=None): # I presume that the lower and upper bounds in constraint terms are ignored variable_term_to_row = ObjToRow((18, 18), None, ('name', 'type')) if header: return variable_term_to_row.header_rows() return variable_term_to_row.obj_as_row(variable_term) @classmethod def show_conv_opt_constraints(cls, constraints): rows = [''] rows.extend(cls.show_conv_opt_constraint(header=True)) for id, constraint in constraints.items(): rows.append(cls.show_conv_opt_constraint(constraint=constraint)) rows.append('--- terms ---') for linear_term in constraint.terms: row = f'{linear_term.coefficient:<8}' row += cls.show_conv_opt_variable_term(variable_term=linear_term.variable) rows.append(row) rows.append('') return '\n'.join(rows) @classmethod def show_conv_opt_model(cls, conv_opt_model): """ Convert a `conv_opt` into a readable representation Args: conv_opt_model (:obj:`conv_opt.Model`): a convex optimization model Returns: :obj:`str`: a readable representation of `conv_opt_model` """ conv_opt_model_rows = [''] conv_opt_model_rows.append('--- conf_opt model ---') conv_opt_model_rows.append(f"name: '{conv_opt_model.name}'") # variables: skip 'primal', 'reduced_cost', which aren't used conv_opt_model_rows.append('') conv_opt_model_rows.append('--- variables ---') conv_opt_model_rows.extend(cls.show_conv_opt_variable(header=True)) for variable in conv_opt_model.variables: conv_opt_model_rows.append(cls.show_conv_opt_variable(variable=variable)) # linear terms include Variable as a field, so just print the coefficient directly conv_opt_model_rows.append('') conv_opt_model_rows.append('--- constraints ---') conv_opt_model_rows.extend(cls.show_conv_opt_constraint(header=True)) for constraint in conv_opt_model.constraints: conv_opt_model_rows.append(cls.show_conv_opt_constraint(constraint=constraint)) conv_opt_model_rows.append('--- terms ---') for linear_term in constraint.terms: row = f'{linear_term.coefficient:<8}' row += cls.show_conv_opt_variable_term(variable_term=linear_term.variable) conv_opt_model_rows.append(row) conv_opt_model_rows.append('') conv_opt_model_rows.append(f'objective direction: {conv_opt_model.objective_direction.name}') conv_opt_model_rows.append('') conv_opt_model_rows.append('--- objective terms ---') for objective_term in conv_opt_model.objective_terms: row = f'{objective_term.coefficient:<8}' row += cls.show_conv_opt_variable_term(variable_term=objective_term.variable) conv_opt_model_rows.append(row) return '\n'.join(conv_opt_model_rows)
from ipdb import set_trace as st import os import time import random import numpy as np from loguru import logger import torch def set_seed(seed): random.seed(seed) np.random.seed(seed) os.environ["PYTHONHASHSEED"] = str(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) # type: ignore torch.backends.cudnn.deterministic = True # type: ignore torch.backends.cudnn.benchmark = True # type: ignore def get_save_dir_exp(config): _dir = os.path.dirname(os.path.abspath(__file__)) exp_name = _dir.split('/')[-1] dir_save_exp = f'{config['path']['dir_save']}{exp_name}' dir_save_ignore_exp = f'{config['path']['dir_save_ignore']}{exp_name}' return dir_save_exp, dir_save_ignore_exp, exp_name def mixup_data(x, y, alpha=1.0, use_cuda=True): '''Returns mixed inputs, pairs of targets, and lambda''' if alpha > 0: lam = np.random.beta(alpha, alpha) else: lam = 1 batch_size = x.size()[0] if use_cuda: index = torch.randperm(batch_size).cuda() else: index = torch.randperm(batch_size) mixed_x = lam * x + (1 - lam) * x[index, :] y_a, y_b = y, y[index] return mixed_x, y_a, y_b, lam def get_debug_idx(trn_tp, trn_idxs, val_idxs, config): n_classes = config['model']['params']['n_classes'] trn_tp_trn = trn_tp.iloc[trn_idxs].copy() trn_tp_val = trn_tp.iloc[val_idxs].copy() trn_tp_trn['idx_'] = trn_idxs trn_tp_val['idx_'] = val_idxs trn_idxs_debug = [] val_idxs_debug = [] for idx in range(n_classes): bools = trn_tp_trn.species_id == idx trn_idxs_debug.append(trn_tp_trn[bools]['idx_'].values[0]) bools = trn_tp_val.species_id == idx val_idxs_debug.append(trn_tp_val[bools]['idx_'].values[0]) return trn_idxs_debug, val_idxs_debug def set_debug_config(config): if config['globals']['debug']: logger.info(':: debug mode ::') config['globals']['num_epochs'] = 2 config['split']['n_fold'] = 2 config['loader']['train']['batch_size'] = 1 config['loader']['valid']['batch_size'] = 1 return config else: return config def sec2time(sec): hour = int(sec//3600) minute = int((sec - 3600*hour)//60) second = int(sec - 3600*hour - 60*minute) hour = str(hour).zfill(2) minute = str(minute).zfill(2) second = str(second).zfill(2) str_time = f'{hour}:{minute}:{second}' return str_time def LWLRAP(preds, labels): ''' https://github.com/yuki-a4/rfcx-species-audio-detection/blob/main/yuki/notebook/ex_059_resnest_changeLoss_lr_0.15_aug0.3_seed239.ipynb ''' # st() # device device = torch.device("cuda" if torch.cuda.is_available() else "cpu") preds, labels = preds.to(device), labels.to(device) # Ranks of the predictions ranked_classes = torch.argsort(preds, dim=-1, descending=True) # i, j corresponds to rank of prediction in row i class_ranks = torch.zeros_like(ranked_classes) for i in range(ranked_classes.size(0)): for j in range(ranked_classes.size(1)): class_ranks[i, ranked_classes[i][j]] = j + 1 # Mask out to only use the ranks of relevant GT labels ground_truth_ranks = class_ranks * labels + (1e6) * (1 - labels) # All the GT ranks are in front now sorted_ground_truth_ranks, _ = torch.sort(ground_truth_ranks, dim=-1, descending=False) # Number of GT labels per instance # num_labels = labels.sum(-1) pos_matrix = torch.tensor( np.array([i+1 for i in range(labels.size(-1))])).unsqueeze(0) pos_matrix = pos_matrix.to(device) sorted_ground_truth_ranks = sorted_ground_truth_ranks.to(device) score_matrix = pos_matrix / sorted_ground_truth_ranks score_mask_matrix, _ = torch.sort(labels, dim=-1, descending=True) scores = score_matrix * score_mask_matrix score = scores.sum() / labels.sum() return score.item()
from ipdb import set_trace as st import os import time import random import numpy as np from loguru import logger import torch def set_seed(seed): random.seed(seed) np.random.seed(seed) os.environ["PYTHONHASHSEED"] = str(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) # type: ignore torch.backends.cudnn.deterministic = True # type: ignore torch.backends.cudnn.benchmark = True # type: ignore def get_save_dir_exp(config): _dir = os.path.dirname(os.path.abspath(__file__)) exp_name = _dir.split('/')[-1] dir_save_exp = f'{config["path"]["dir_save"]}{exp_name}' dir_save_ignore_exp = f'{config["path"]["dir_save_ignore"]}{exp_name}' return dir_save_exp, dir_save_ignore_exp, exp_name def mixup_data(x, y, alpha=1.0, use_cuda=True): '''Returns mixed inputs, pairs of targets, and lambda''' if alpha > 0: lam = np.random.beta(alpha, alpha) else: lam = 1 batch_size = x.size()[0] if use_cuda: index = torch.randperm(batch_size).cuda() else: index = torch.randperm(batch_size) mixed_x = lam * x + (1 - lam) * x[index, :] y_a, y_b = y, y[index] return mixed_x, y_a, y_b, lam def get_debug_idx(trn_tp, trn_idxs, val_idxs, config): n_classes = config['model']['params']['n_classes'] trn_tp_trn = trn_tp.iloc[trn_idxs].copy() trn_tp_val = trn_tp.iloc[val_idxs].copy() trn_tp_trn['idx_'] = trn_idxs trn_tp_val['idx_'] = val_idxs trn_idxs_debug = [] val_idxs_debug = [] for idx in range(n_classes): bools = trn_tp_trn.species_id == idx trn_idxs_debug.append(trn_tp_trn[bools]['idx_'].values[0]) bools = trn_tp_val.species_id == idx val_idxs_debug.append(trn_tp_val[bools]['idx_'].values[0]) return trn_idxs_debug, val_idxs_debug def set_debug_config(config): if config['globals']['debug']: logger.info(':: debug mode ::') config['globals']['num_epochs'] = 2 config['split']['n_fold'] = 2 config['loader']['train']['batch_size'] = 1 config['loader']['valid']['batch_size'] = 1 return config else: return config def sec2time(sec): hour = int(sec//3600) minute = int((sec - 3600*hour)//60) second = int(sec - 3600*hour - 60*minute) hour = str(hour).zfill(2) minute = str(minute).zfill(2) second = str(second).zfill(2) str_time = f'{hour}:{minute}:{second}' return str_time def LWLRAP(preds, labels): ''' https://github.com/yuki-a4/rfcx-species-audio-detection/blob/main/yuki/notebook/ex_059_resnest_changeLoss_lr_0.15_aug0.3_seed239.ipynb ''' # st() # device device = torch.device("cuda" if torch.cuda.is_available() else "cpu") preds, labels = preds.to(device), labels.to(device) # Ranks of the predictions ranked_classes = torch.argsort(preds, dim=-1, descending=True) # i, j corresponds to rank of prediction in row i class_ranks = torch.zeros_like(ranked_classes) for i in range(ranked_classes.size(0)): for j in range(ranked_classes.size(1)): class_ranks[i, ranked_classes[i][j]] = j + 1 # Mask out to only use the ranks of relevant GT labels ground_truth_ranks = class_ranks * labels + (1e6) * (1 - labels) # All the GT ranks are in front now sorted_ground_truth_ranks, _ = torch.sort(ground_truth_ranks, dim=-1, descending=False) # Number of GT labels per instance # num_labels = labels.sum(-1) pos_matrix = torch.tensor( np.array([i+1 for i in range(labels.size(-1))])).unsqueeze(0) pos_matrix = pos_matrix.to(device) sorted_ground_truth_ranks = sorted_ground_truth_ranks.to(device) score_matrix = pos_matrix / sorted_ground_truth_ranks score_mask_matrix, _ = torch.sort(labels, dim=-1, descending=True) scores = score_matrix * score_mask_matrix score = scores.sum() / labels.sum() return score.item()
class Verdict: def __init__(self, reconciled: bool=None, verdict: str=None): self.reconciled = reconciled self.verdicts = [] if verdict: self.verdicts.append(verdict) def __str__(self): return f'<Verdict / {'reconciled' if self.reconciled else 'non-reconciled'} ({'; '.join(self.verdicts)})'
class Verdict: def __init__(self, reconciled: bool=None, verdict: str=None): self.reconciled = reconciled self.verdicts = [] if verdict: self.verdicts.append(verdict) def __str__(self): return f'<Verdict / {"reconciled" if self.reconciled else "non-reconciled"} ({"; ".join(self.verdicts)})'
"""scrapli.factory""" import importlib from copy import deepcopy from io import BytesIO from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union, cast from scrapli.driver import AsyncGenericDriver, AsyncNetworkDriver, GenericDriver, NetworkDriver from scrapli.driver.core import ( AsyncEOSDriver, AsyncIOSXEDriver, AsyncIOSXRDriver, AsyncJunosDriver, AsyncNXOSDriver, EOSDriver, IOSXEDriver, IOSXRDriver, JunosDriver, NXOSDriver, ) from scrapli.driver.network.base_driver import PrivilegeLevel from scrapli.exceptions import ( ScrapliException, ScrapliModuleNotFound, ScrapliTypeError, ScrapliValueError, ) from scrapli.helper import format_user_warning from scrapli.logging import logger from scrapli.transport import ASYNCIO_TRANSPORTS def _build_provided_kwargs_dict( # pylint: disable=R0914 host: str, privilege_levels: Optional[Dict[str, PrivilegeLevel]], default_desired_privilege_level: Optional[str], port: Optional[int], auth_username: Optional[str], auth_password: Optional[str], auth_private_key: Optional[str], auth_private_key_passphrase: Optional[str], auth_strict_key: Optional[bool], auth_bypass: Optional[bool], timeout_socket: Optional[float], timeout_transport: Optional[float], timeout_ops: Optional[float], comms_return_char: Optional[str], comms_ansi: Optional[bool], ssh_config_file: Optional[Union[str, bool]], ssh_known_hosts_file: Optional[Union[str, bool]], on_init: Optional[Callable[..., Any]], on_open: Optional[Callable[..., Any]], on_close: Optional[Callable[..., Any]], transport: Optional[str], transport_options: Optional[Dict[str, Any]], channel_log: Optional[Union[str, bool, BytesIO]], channel_log_mode: Optional[str], channel_lock: Optional[bool], logging_uid: Optional[str], auth_secondary: Optional[str], failed_when_contains: Optional[List[str]], textfsm_platform: Optional[str], genie_platform: Optional[str], **kwargs: Dict[Any, Any], ) -> Dict[str, Any]: r""" Build arguments dict based on provided inputs This function builds the dict of keyword args to unpack and send to the driver -- in the factory context this also needs to convert the arguments that have defaults that evaluate to False (i.e ssh_config_file which defaults to False) from None which is their default in the factory, back to their normal default if they are still None -OR- to whatever the user provided. # noqa: DAR101 Args: N/A Returns: dict: dictionary with user args merged with the appropriate default options Raises: N/A """ # dict of all args coming from the factories _provided_args: Dict[str, Any] = { "host": host, "privilege_levels": privilege_levels, "default_desired_privilege_level": default_desired_privilege_level, "port": port, "auth_username": auth_username, "auth_password": auth_password, "auth_private_key": auth_private_key, "auth_private_key_passphrase": auth_private_key_passphrase, "auth_strict_key": auth_strict_key, "auth_bypass": auth_bypass, "timeout_socket": timeout_socket, "timeout_transport": timeout_transport, "timeout_ops": timeout_ops, "comms_return_char": comms_return_char, "comms_ansi": comms_ansi, "ssh_config_file": ssh_config_file, "ssh_known_hosts_file": ssh_known_hosts_file, "on_init": on_init, "on_open": on_open, "on_close": on_close, "transport": transport, "transport_options": transport_options, "channel_log": channel_log, "channel_log_mode": channel_log_mode, "channel_lock": channel_lock, "logging_uid": logging_uid, "auth_secondary": auth_secondary, "failed_when_contains": failed_when_contains, "textfsm_platform": textfsm_platform, "genie_platform": genie_platform, } # add back in the None/False args _provided_args = {key: value for key, value in _provided_args.items() if value is not None} # merge in any kwargs that maybe need to get passed down all_provided_args = {**_provided_args, **kwargs} return all_provided_args def _get_community_platform_details(community_platform_name: str) -> Dict[str, Any]: """ Fetch community platform details Args: community_platform_name: name of community Returns: platform_details: dict of details about community platform from scrapli_community library Raises: ScrapliModuleNotFound: if scrapli_community is not importable ScrapliModuleNotFound: if provided community_platform_name package is not importable ScrapliException: if community platform is missing "SCRAPLI_PLATFORM" attribute """ try: importlib.import_module(name="scrapli_community") except ModuleNotFoundError as exc: title = "Module not found!" message = ( "Scrapli Community package is not installed!\n" "To resolve this issue, install the transport plugin. You can do this in one of " "the following ways:\n" "1: 'pip install -r requirements-community.txt'\n" "2: 'pip install scrapli[community]'" ) warning = format_user_warning(title=title, message=message) raise ScrapliModuleNotFound(warning) from exc try: # replace any underscores in platform name with "."; should support any future platforms # that dont have "child" os types -- i.e. just "cisco" instead of "cisco_iosxe" scrapli_community_platform = importlib.import_module( name=f"scrapli_community.{community_platform_name.replace("_", ".")}" ) except ModuleNotFoundError as exc: title = "Module not found!" message = ( f"Scrapli Community platform '{community_platform_name}` not found!\n" "To resolve this issue, ensure you have the correct platform name, and that a scrapli " " community platform of that name exists!" ) warning = format_user_warning(title=title, message=message) raise ScrapliModuleNotFound(warning) from exc platform_details_original = getattr(scrapli_community_platform, "SCRAPLI_PLATFORM", {}) if not platform_details_original: msg = "Community platform missing required attribute `SCRAPLI_PLATFORM`" raise ScrapliException(msg) platform_details: Dict[str, Any] = deepcopy(platform_details_original) return platform_details def _get_driver_kwargs( platform_details: Dict[str, Any], variant: Optional[str], _async: bool = False ) -> Dict[str, Any]: """ Parent get driver method Args: platform_details: dict of details about community platform from scrapli_community library variant: optional name of variant of community platform _async: True/False this is for an asyncio transport driver Returns: final_platform_kwargs: dict of final driver kwargs Raises: N/A """ platform_kwargs = platform_details["defaults"] if variant: variant_kwargs = platform_details["variants"][variant] final_platform_kwargs = {**platform_kwargs, **variant_kwargs} else: final_platform_kwargs = platform_kwargs if not _async: # remove unnecessary asyncio things final_platform_kwargs.pop("async_on_open") final_platform_kwargs.pop("async_on_close") # rename sync_on_(open|close) keys to just "on_open"/"on_close" final_platform_kwargs["on_open"] = final_platform_kwargs.pop("sync_on_open") final_platform_kwargs["on_close"] = final_platform_kwargs.pop("sync_on_close") else: # remove unnecessary sync things final_platform_kwargs.pop("sync_on_open") final_platform_kwargs.pop("sync_on_close") # rename sync_on_(open|close) keys to just "on_open"/"on_close" final_platform_kwargs["on_open"] = final_platform_kwargs.pop("async_on_open") final_platform_kwargs["on_close"] = final_platform_kwargs.pop("async_on_close") return final_platform_kwargs class Scrapli(NetworkDriver): CORE_PLATFORM_MAP = { "arista_eos": EOSDriver, "cisco_iosxe": IOSXEDriver, "cisco_iosxr": IOSXRDriver, "cisco_nxos": NXOSDriver, "juniper_junos": JunosDriver, } DRIVER_MAP = {"network": NetworkDriver, "generic": GenericDriver} @classmethod def _get_driver_class( cls, platform_details: Dict[str, Any], variant: Optional[str] ) -> Union[Type[NetworkDriver], Type[GenericDriver]]: """ Fetch community driver class based on platform details Args: platform_details: dict of details about community platform from scrapli_community library variant: optional name of variant of community platform Returns: NetworkDriver: final driver class Raises: N/A """ final_driver: Union[ Type[NetworkDriver], Type[GenericDriver], ] if variant and platform_details["variants"][variant].get("driver_type"): variant_driver_data = platform_details["variants"][variant].pop("driver_type") final_driver = variant_driver_data["sync"] return final_driver if isinstance(platform_details["driver_type"], str): driver_type = platform_details["driver_type"] standard_final_driver = cls.DRIVER_MAP.get(driver_type, None) if standard_final_driver: return standard_final_driver final_driver = platform_details["driver_type"]["sync"] return final_driver @classmethod def _get_community_driver( cls, community_platform_name: str, variant: Optional[str] ) -> Tuple[Union[Type[NetworkDriver], Type[GenericDriver]], Dict[str, Any]]: """ Get community driver Args: community_platform_name: name of community variant: optional name of variant of community platform Returns: NetworkDriver: final driver class Raises: N/A """ platform_details = _get_community_platform_details( community_platform_name=community_platform_name ) final_driver = cls._get_driver_class(platform_details=platform_details, variant=variant) final_platform_kwargs = _get_driver_kwargs( platform_details=platform_details, variant=variant, _async=False ) return final_driver, final_platform_kwargs @classmethod def _get_driver( cls, platform: str, variant: Optional[str] ) -> Tuple[Union[Type[NetworkDriver], Type[GenericDriver]], Dict[str, Any]]: """ Parent get driver method for sync Scrapli Args: platform: name of target platform; i.e. `cisco_iosxe`, `arista_eos`, etc. variant: name of the target platform variant Returns: NetworkDriver: final driver class; generally NetworkDriver, but for some community platforms could be GenericDriver, also returns any additional kwargs comming from the community platform (if any) Raises: N/A """ additional_kwargs: Dict[str, Any] = {} final_driver: Union[Type[GenericDriver], Type[NetworkDriver]] if platform in cls.CORE_PLATFORM_MAP: final_driver = cls.CORE_PLATFORM_MAP[platform] msg = f"Driver '{final_driver}' selected from scrapli core drivers" else: final_driver, additional_kwargs = cls._get_community_driver( community_platform_name=platform, variant=variant ) msg = ( f"Driver '{final_driver}' selected from scrapli community platforms, with the " f"following platform arguments: '{additional_kwargs}'" ) logger.info(msg) return final_driver, additional_kwargs def __new__( # pylint: disable=R0914 cls, platform: str, host: str, privilege_levels: Optional[Dict[str, PrivilegeLevel]] = None, default_desired_privilege_level: Optional[str] = None, port: Optional[int] = None, auth_username: Optional[str] = None, auth_password: Optional[str] = None, auth_private_key: Optional[str] = None, auth_private_key_passphrase: Optional[str] = None, auth_strict_key: Optional[bool] = None, auth_bypass: Optional[bool] = None, timeout_socket: Optional[float] = None, timeout_transport: Optional[float] = None, timeout_ops: Optional[float] = None, comms_return_char: Optional[str] = None, comms_ansi: Optional[bool] = None, ssh_config_file: Optional[Union[str, bool]] = None, ssh_known_hosts_file: Optional[Union[str, bool]] = None, on_init: Optional[Callable[..., Any]] = None, on_open: Optional[Callable[..., Any]] = None, on_close: Optional[Callable[..., Any]] = None, transport: Optional[str] = None, transport_options: Optional[Dict[str, Any]] = None, channel_log: Optional[Union[str, bool, BytesIO]] = None, channel_lock: Optional[bool] = None, channel_log_mode: Optional[str] = None, logging_uid: Optional[str] = None, auth_secondary: Optional[str] = None, failed_when_contains: Optional[List[str]] = None, textfsm_platform: Optional[str] = None, genie_platform: Optional[str] = None, variant: Optional[str] = None, **kwargs: Dict[Any, Any], ) -> "Scrapli": r""" Scrapli Factory method for synchronous drivers Args: platform: name of the scrapli platform to return a connection object for; should be one of the "core" platforms or a valid community platform name host: host ip/name to connect to port: port to connect to auth_username: username for authentication auth_private_key: path to private key for authentication auth_private_key_passphrase: passphrase for decrypting ssh key if necessary auth_password: password for authentication auth_strict_key: strict host checking or not auth_bypass: bypass "in channel" authentication -- only supported with telnet, asynctelnet, and system transport plugins timeout_socket: timeout for establishing socket/initial connection in seconds timeout_transport: timeout for ssh|telnet transport in seconds timeout_ops: timeout for ssh channel operations comms_return_char: character to use to send returns to host comms_ansi: True/False strip comms_ansi characters from output, generally the default value of False should be fine ssh_config_file: string to path for ssh config file, True to use default ssh config file or False to ignore default ssh config file ssh_known_hosts_file: string to path for ssh known hosts file, True to use default known file locations. Only applicable/needed if `auth_strict_key` is set to True on_init: callable that accepts the class instance as its only argument. this callable, if provided, is executed as the last step of object instantiation -- its purpose is primarily to provide a mechanism for scrapli community platforms to have an easy way to modify initialization arguments/object attributes without needing to create a class that extends the driver, instead allowing the community platforms to simply build from the GenericDriver or NetworkDriver classes, and pass this callable to do things such as appending to a username (looking at you RouterOS!!). Note that this is *always* a synchronous function (even for asyncio drivers)! on_open: callable that accepts the class instance as its only argument. this callable, if provided, is executed immediately after authentication is completed. Common use cases for this callable would be to disable paging or accept any kind of banner message that prompts a user upon connection on_close: callable that accepts the class instance as its only argument. this callable, if provided, is executed immediately prior to closing the underlying transport. Common use cases for this callable would be to save configurations prior to exiting, or to logout properly to free up vtys or similar transport: name of the transport plugin to use for the actual telnet/ssh/netconf connection. Available "core" transports are: - system - telnet - asynctelnet - ssh2 - paramiko - asyncssh Please see relevant transport plugin section for details. Additionally third party transport plugins may be available. transport_options: dictionary of options to pass to selected transport class; see docs for given transport class for details of what to pass here channel_lock: True/False to lock the channel (threading.Lock/asyncio.Lock) during any channel operations, defaults to False channel_log: True/False or a string path to a file of where to write out channel logs -- these are not "logs" in the normal logging module sense, but only the output that is read from the channel. In other words, the output of the channel log should look similar to what you would see as a human connecting to a device channel_log_mode: "write"|"append", all other values will raise ValueError, does what it sounds like it should by setting the channel log to the provided mode logging_uid: unique identifier (string) to associate to log messages; useful if you have multiple connections to the same device (i.e. one console, one ssh, or one to each supervisor module, etc.) failed_when_contains: list of strings indicating command/config failure textfsm_platform: string to use to fetch ntc-templates templates for textfsm parsing genie_platform: string to use to fetch genie parser templates privilege_levels: optional user provided privilege levels, if left None will default to scrapli standard privilege levels default_desired_privilege_level: string of name of default desired priv, this is the priv level that is generally used to disable paging/set terminal width and things like that upon first login, and is also the priv level scrapli will try to acquire for normal "command" operations (`send_command`, `send_commands`) auth_secondary: password to use for secondary authentication (enable) failed_when_contains: List of strings that indicate a command/config has failed variant: name of the community platform variant if desired **kwargs: should be unused, but here to accept any additional kwargs from users Returns: final_driver: synchronous driver class for provided driver Raises: ScrapliValueError: if provided transport is asyncio ScrapliTypeError: if `platform` not in keyword arguments """ logger.debug("Scrapli factory initialized") if transport in ASYNCIO_TRANSPORTS: raise ScrapliValueError("Use 'AsyncScrapli' if using an async transport!") if not isinstance(platform, str): raise ScrapliTypeError(f"Argument 'platform' must be 'str' got '{type(platform)}'") provided_kwargs = _build_provided_kwargs_dict( host=host, port=port, auth_username=auth_username, auth_password=auth_password, auth_private_key=auth_private_key, auth_private_key_passphrase=auth_private_key_passphrase, auth_strict_key=auth_strict_key, auth_bypass=auth_bypass, timeout_socket=timeout_socket, timeout_transport=timeout_transport, timeout_ops=timeout_ops, comms_return_char=comms_return_char, comms_ansi=comms_ansi, ssh_config_file=ssh_config_file, ssh_known_hosts_file=ssh_known_hosts_file, on_init=on_init, on_open=on_open, on_close=on_close, transport=transport, transport_options=transport_options, channel_log=channel_log, channel_log_mode=channel_log_mode, channel_lock=channel_lock, logging_uid=logging_uid, privilege_levels=privilege_levels, default_desired_privilege_level=default_desired_privilege_level, auth_secondary=auth_secondary, failed_when_contains=failed_when_contains, textfsm_platform=textfsm_platform, genie_platform=genie_platform, **kwargs, ) final_driver, additional_kwargs = cls._get_driver(platform=platform, variant=variant) # at this point will need to merge the additional kwargs in (for community drivers), # ensure that kwargs passed by user supersede the ones coming from community platform if additional_kwargs: final_kwargs = {**additional_kwargs, **provided_kwargs} else: final_kwargs = provided_kwargs final_conn = final_driver(**final_kwargs) # cast the final conn to type Scrapli to appease mypy -- we know it will be a NetworkDriver # or GenericDriver, but thats ok =) final_conn = cast(Scrapli, final_conn) return final_conn class AsyncScrapli(AsyncNetworkDriver): CORE_PLATFORM_MAP = { "arista_eos": AsyncEOSDriver, "cisco_iosxe": AsyncIOSXEDriver, "cisco_iosxr": AsyncIOSXRDriver, "cisco_nxos": AsyncNXOSDriver, "juniper_junos": AsyncJunosDriver, } DRIVER_MAP = {"network": AsyncNetworkDriver, "generic": AsyncGenericDriver} @classmethod def _get_driver_class( cls, platform_details: Dict[str, Any], variant: Optional[str] ) -> Union[Type[AsyncNetworkDriver], Type[AsyncGenericDriver]]: """ Fetch community driver class based on platform details Args: platform_details: dict of details about community platform from scrapli_community library variant: optional name of variant of community platform Returns: NetworkDriver: final driver class Raises: N/A """ final_driver: Union[ Type[AsyncNetworkDriver], Type[AsyncGenericDriver], ] if variant and platform_details["variants"][variant].get("driver_type"): variant_driver_data = platform_details["variants"][variant].pop("driver_type") final_driver = variant_driver_data["async"] return final_driver if isinstance(platform_details["driver_type"], str): driver_type = platform_details["driver_type"] standard_final_driver = cls.DRIVER_MAP.get(driver_type, None) if standard_final_driver: return standard_final_driver final_driver = platform_details["driver_type"]["async"] return final_driver @classmethod def _get_community_driver( cls, community_platform_name: str, variant: Optional[str] ) -> Tuple[Union[Type[AsyncNetworkDriver], Type[AsyncGenericDriver]], Dict[str, Any]]: """ Get community driver Args: community_platform_name: name of community variant: optional name of variant of community platform Returns: NetworkDriver: final driver class Raises: N/A """ platform_details = _get_community_platform_details( community_platform_name=community_platform_name ) final_driver = cls._get_driver_class(platform_details=platform_details, variant=variant) final_platform_kwargs = _get_driver_kwargs( platform_details=platform_details, variant=variant, _async=True ) return final_driver, final_platform_kwargs @classmethod def _get_driver( cls, platform: str, variant: Optional[str] ) -> Tuple[Union[Type[AsyncNetworkDriver], Type[AsyncGenericDriver]], Dict[str, Any]]: """ Parent get driver method for sync Scrapli Args: platform: name of target platform; i.e. `cisco_iosxe`, `arista_eos`, etc. variant: name of the target platform variant Returns: NetworkDriver: final driver class; generally NetworkDriver, but for some community platforms could be GenericDriver, also returns any additional kwargs comming from the community platform (if any) Raises: N/A """ additional_kwargs: Dict[str, Any] = {} final_driver: Union[Type[AsyncGenericDriver], Type[AsyncNetworkDriver]] if platform in cls.CORE_PLATFORM_MAP: final_driver = cls.CORE_PLATFORM_MAP[platform] msg = f"Driver '{final_driver}' selected from scrapli core drivers" else: final_driver, additional_kwargs = cls._get_community_driver( community_platform_name=platform, variant=variant ) msg = ( f"Driver '{final_driver}' selected from scrapli community platforms, with the " f"following platform arguments: '{additional_kwargs}'" ) logger.info(msg) return final_driver, additional_kwargs def __new__( # pylint: disable=R0914 cls, platform: str, host: str, privilege_levels: Optional[Dict[str, PrivilegeLevel]] = None, default_desired_privilege_level: Optional[str] = None, port: Optional[int] = None, auth_username: Optional[str] = None, auth_password: Optional[str] = None, auth_private_key: Optional[str] = None, auth_private_key_passphrase: Optional[str] = None, auth_strict_key: Optional[bool] = None, auth_bypass: Optional[bool] = None, timeout_socket: Optional[float] = None, timeout_transport: Optional[float] = None, timeout_ops: Optional[float] = None, comms_return_char: Optional[str] = None, comms_ansi: Optional[bool] = None, ssh_config_file: Optional[Union[str, bool]] = None, ssh_known_hosts_file: Optional[Union[str, bool]] = None, on_init: Optional[Callable[..., Any]] = None, on_open: Optional[Callable[..., Any]] = None, on_close: Optional[Callable[..., Any]] = None, transport: Optional[str] = None, transport_options: Optional[Dict[str, Any]] = None, channel_log: Optional[Union[str, bool, BytesIO]] = None, channel_log_mode: Optional[str] = None, channel_lock: Optional[bool] = None, logging_uid: Optional[str] = None, auth_secondary: Optional[str] = None, failed_when_contains: Optional[List[str]] = None, textfsm_platform: Optional[str] = None, genie_platform: Optional[str] = None, variant: Optional[str] = None, **kwargs: Dict[Any, Any], ) -> "AsyncScrapli": r""" Scrapli Factory method for asynchronous drivers Args: platform: name of the scrapli platform to return a connection object for; should be one of the "core" platforms or a valid community platform name host: host ip/name to connect to port: port to connect to auth_username: username for authentication auth_private_key: path to private key for authentication auth_private_key_passphrase: passphrase for decrypting ssh key if necessary auth_password: password for authentication auth_strict_key: strict host checking or not auth_bypass: bypass "in channel" authentication -- only supported with telnet, asynctelnet, and system transport plugins timeout_socket: timeout for establishing socket/initial connection in seconds timeout_transport: timeout for ssh|telnet transport in seconds timeout_ops: timeout for ssh channel operations comms_return_char: character to use to send returns to host comms_ansi: True/False strip comms_ansi characters from output, generally the default value of False should be fine ssh_config_file: string to path for ssh config file, True to use default ssh config file or False to ignore default ssh config file ssh_known_hosts_file: string to path for ssh known hosts file, True to use default known file locations. Only applicable/needed if `auth_strict_key` is set to True on_init: callable that accepts the class instance as its only argument. this callable, if provided, is executed as the last step of object instantiation -- its purpose is primarily to provide a mechanism for scrapli community platforms to have an easy way to modify initialization arguments/object attributes without needing to create a class that extends the driver, instead allowing the community platforms to simply build from the GenericDriver or NetworkDriver classes, and pass this callable to do things such as appending to a username (looking at you RouterOS!!). Note that this is *always* a synchronous function (even for asyncio drivers)! on_open: callable that accepts the class instance as its only argument. this callable, if provided, is executed immediately after authentication is completed. Common use cases for this callable would be to disable paging or accept any kind of banner message that prompts a user upon connection on_close: callable that accepts the class instance as its only argument. this callable, if provided, is executed immediately prior to closing the underlying transport. Common use cases for this callable would be to save configurations prior to exiting, or to logout properly to free up vtys or similar transport: name of the transport plugin to use for the actual telnet/ssh/netconf connection. Available "core" transports are: - system - telnet - asynctelnet - ssh2 - paramiko - asyncssh Please see relevant transport plugin section for details. Additionally third party transport plugins may be available. transport_options: dictionary of options to pass to selected transport class; see docs for given transport class for details of what to pass here channel_lock: True/False to lock the channel (threading.Lock/asyncio.Lock) during any channel operations, defaults to False channel_log: True/False or a string path to a file of where to write out channel logs -- these are not "logs" in the normal logging module sense, but only the output that is read from the channel. In other words, the output of the channel log should look similar to what you would see as a human connecting to a device channel_log_mode: "write"|"append", all other values will raise ValueError, does what it sounds like it should by setting the channel log to the provided mode logging_uid: unique identifier (string) to associate to log messages; useful if you have multiple connections to the same device (i.e. one console, one ssh, or one to each supervisor module, etc.) failed_when_contains: list of strings indicating command/config failure textfsm_platform: string to use to fetch ntc-templates templates for textfsm parsing genie_platform: string to use to fetch genie parser templates privilege_levels: optional user provided privilege levels, if left None will default to scrapli standard privilege levels default_desired_privilege_level: string of name of default desired priv, this is the priv level that is generally used to disable paging/set terminal width and things like that upon first login, and is also the priv level scrapli will try to acquire for normal "command" operations (`send_command`, `send_commands`) auth_secondary: password to use for secondary authentication (enable) failed_when_contains: List of strings that indicate a command/config has failed variant: name of the community platform variant if desired **kwargs: should be unused, but here to accept any additional kwargs from users Returns: final_driver: asynchronous driver class for provided driver Raises: ScrapliValueError: if provided transport is asyncio ScrapliTypeError: if `platform` not in keyword arguments """ logger.debug("AsyncScrapli factory initialized") if transport not in ASYNCIO_TRANSPORTS: raise ScrapliValueError("Use 'Scrapli' if using a synchronous transport!") if not isinstance(platform, str): raise ScrapliTypeError(f"Argument 'platform' must be 'str' got '{type(platform)}'") provided_kwargs = _build_provided_kwargs_dict( host=host, port=port, auth_username=auth_username, auth_password=auth_password, auth_private_key=auth_private_key, auth_private_key_passphrase=auth_private_key_passphrase, auth_strict_key=auth_strict_key, auth_bypass=auth_bypass, timeout_socket=timeout_socket, timeout_transport=timeout_transport, timeout_ops=timeout_ops, comms_return_char=comms_return_char, comms_ansi=comms_ansi, ssh_config_file=ssh_config_file, ssh_known_hosts_file=ssh_known_hosts_file, on_init=on_init, on_open=on_open, on_close=on_close, transport=transport, transport_options=transport_options, channel_log=channel_log, channel_log_mode=channel_log_mode, channel_lock=channel_lock, logging_uid=logging_uid, privilege_levels=privilege_levels, default_desired_privilege_level=default_desired_privilege_level, auth_secondary=auth_secondary, failed_when_contains=failed_when_contains, textfsm_platform=textfsm_platform, genie_platform=genie_platform, **kwargs, ) final_driver, additional_kwargs = cls._get_driver(platform=platform, variant=variant) # at this point will need to merge the additional kwargs in (for community drivers), # ensure that kwargs passed by user supersede the ones coming from community platform if additional_kwargs: final_kwargs = {**additional_kwargs, **provided_kwargs} else: final_kwargs = provided_kwargs final_conn = final_driver(**final_kwargs) # cast the final conn to type Scrapli to appease mypy -- we know it will be a NetworkDriver # or GenericDriver, but thats ok =) final_conn = cast(AsyncScrapli, final_conn) return final_conn
"""scrapli.factory""" import importlib from copy import deepcopy from io import BytesIO from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union, cast from scrapli.driver import AsyncGenericDriver, AsyncNetworkDriver, GenericDriver, NetworkDriver from scrapli.driver.core import ( AsyncEOSDriver, AsyncIOSXEDriver, AsyncIOSXRDriver, AsyncJunosDriver, AsyncNXOSDriver, EOSDriver, IOSXEDriver, IOSXRDriver, JunosDriver, NXOSDriver, ) from scrapli.driver.network.base_driver import PrivilegeLevel from scrapli.exceptions import ( ScrapliException, ScrapliModuleNotFound, ScrapliTypeError, ScrapliValueError, ) from scrapli.helper import format_user_warning from scrapli.logging import logger from scrapli.transport import ASYNCIO_TRANSPORTS def _build_provided_kwargs_dict( # pylint: disable=R0914 host: str, privilege_levels: Optional[Dict[str, PrivilegeLevel]], default_desired_privilege_level: Optional[str], port: Optional[int], auth_username: Optional[str], auth_password: Optional[str], auth_private_key: Optional[str], auth_private_key_passphrase: Optional[str], auth_strict_key: Optional[bool], auth_bypass: Optional[bool], timeout_socket: Optional[float], timeout_transport: Optional[float], timeout_ops: Optional[float], comms_return_char: Optional[str], comms_ansi: Optional[bool], ssh_config_file: Optional[Union[str, bool]], ssh_known_hosts_file: Optional[Union[str, bool]], on_init: Optional[Callable[..., Any]], on_open: Optional[Callable[..., Any]], on_close: Optional[Callable[..., Any]], transport: Optional[str], transport_options: Optional[Dict[str, Any]], channel_log: Optional[Union[str, bool, BytesIO]], channel_log_mode: Optional[str], channel_lock: Optional[bool], logging_uid: Optional[str], auth_secondary: Optional[str], failed_when_contains: Optional[List[str]], textfsm_platform: Optional[str], genie_platform: Optional[str], **kwargs: Dict[Any, Any], ) -> Dict[str, Any]: r""" Build arguments dict based on provided inputs This function builds the dict of keyword args to unpack and send to the driver -- in the factory context this also needs to convert the arguments that have defaults that evaluate to False (i.e ssh_config_file which defaults to False) from None which is their default in the factory, back to their normal default if they are still None -OR- to whatever the user provided. # noqa: DAR101 Args: N/A Returns: dict: dictionary with user args merged with the appropriate default options Raises: N/A """ # dict of all args coming from the factories _provided_args: Dict[str, Any] = { "host": host, "privilege_levels": privilege_levels, "default_desired_privilege_level": default_desired_privilege_level, "port": port, "auth_username": auth_username, "auth_password": auth_password, "auth_private_key": auth_private_key, "auth_private_key_passphrase": auth_private_key_passphrase, "auth_strict_key": auth_strict_key, "auth_bypass": auth_bypass, "timeout_socket": timeout_socket, "timeout_transport": timeout_transport, "timeout_ops": timeout_ops, "comms_return_char": comms_return_char, "comms_ansi": comms_ansi, "ssh_config_file": ssh_config_file, "ssh_known_hosts_file": ssh_known_hosts_file, "on_init": on_init, "on_open": on_open, "on_close": on_close, "transport": transport, "transport_options": transport_options, "channel_log": channel_log, "channel_log_mode": channel_log_mode, "channel_lock": channel_lock, "logging_uid": logging_uid, "auth_secondary": auth_secondary, "failed_when_contains": failed_when_contains, "textfsm_platform": textfsm_platform, "genie_platform": genie_platform, } # add back in the None/False args _provided_args = {key: value for key, value in _provided_args.items() if value is not None} # merge in any kwargs that maybe need to get passed down all_provided_args = {**_provided_args, **kwargs} return all_provided_args def _get_community_platform_details(community_platform_name: str) -> Dict[str, Any]: """ Fetch community platform details Args: community_platform_name: name of community Returns: platform_details: dict of details about community platform from scrapli_community library Raises: ScrapliModuleNotFound: if scrapli_community is not importable ScrapliModuleNotFound: if provided community_platform_name package is not importable ScrapliException: if community platform is missing "SCRAPLI_PLATFORM" attribute """ try: importlib.import_module(name="scrapli_community") except ModuleNotFoundError as exc: title = "Module not found!" message = ( "Scrapli Community package is not installed!\n" "To resolve this issue, install the transport plugin. You can do this in one of " "the following ways:\n" "1: 'pip install -r requirements-community.txt'\n" "2: 'pip install scrapli[community]'" ) warning = format_user_warning(title=title, message=message) raise ScrapliModuleNotFound(warning) from exc try: # replace any underscores in platform name with "."; should support any future platforms # that dont have "child" os types -- i.e. just "cisco" instead of "cisco_iosxe" scrapli_community_platform = importlib.import_module( name=f"scrapli_community.{community_platform_name.replace('_', '.')}" ) except ModuleNotFoundError as exc: title = "Module not found!" message = ( f"Scrapli Community platform '{community_platform_name}` not found!\n" "To resolve this issue, ensure you have the correct platform name, and that a scrapli " " community platform of that name exists!" ) warning = format_user_warning(title=title, message=message) raise ScrapliModuleNotFound(warning) from exc platform_details_original = getattr(scrapli_community_platform, "SCRAPLI_PLATFORM", {}) if not platform_details_original: msg = "Community platform missing required attribute `SCRAPLI_PLATFORM`" raise ScrapliException(msg) platform_details: Dict[str, Any] = deepcopy(platform_details_original) return platform_details def _get_driver_kwargs( platform_details: Dict[str, Any], variant: Optional[str], _async: bool = False ) -> Dict[str, Any]: """ Parent get driver method Args: platform_details: dict of details about community platform from scrapli_community library variant: optional name of variant of community platform _async: True/False this is for an asyncio transport driver Returns: final_platform_kwargs: dict of final driver kwargs Raises: N/A """ platform_kwargs = platform_details["defaults"] if variant: variant_kwargs = platform_details["variants"][variant] final_platform_kwargs = {**platform_kwargs, **variant_kwargs} else: final_platform_kwargs = platform_kwargs if not _async: # remove unnecessary asyncio things final_platform_kwargs.pop("async_on_open") final_platform_kwargs.pop("async_on_close") # rename sync_on_(open|close) keys to just "on_open"/"on_close" final_platform_kwargs["on_open"] = final_platform_kwargs.pop("sync_on_open") final_platform_kwargs["on_close"] = final_platform_kwargs.pop("sync_on_close") else: # remove unnecessary sync things final_platform_kwargs.pop("sync_on_open") final_platform_kwargs.pop("sync_on_close") # rename sync_on_(open|close) keys to just "on_open"/"on_close" final_platform_kwargs["on_open"] = final_platform_kwargs.pop("async_on_open") final_platform_kwargs["on_close"] = final_platform_kwargs.pop("async_on_close") return final_platform_kwargs class Scrapli(NetworkDriver): CORE_PLATFORM_MAP = { "arista_eos": EOSDriver, "cisco_iosxe": IOSXEDriver, "cisco_iosxr": IOSXRDriver, "cisco_nxos": NXOSDriver, "juniper_junos": JunosDriver, } DRIVER_MAP = {"network": NetworkDriver, "generic": GenericDriver} @classmethod def _get_driver_class( cls, platform_details: Dict[str, Any], variant: Optional[str] ) -> Union[Type[NetworkDriver], Type[GenericDriver]]: """ Fetch community driver class based on platform details Args: platform_details: dict of details about community platform from scrapli_community library variant: optional name of variant of community platform Returns: NetworkDriver: final driver class Raises: N/A """ final_driver: Union[ Type[NetworkDriver], Type[GenericDriver], ] if variant and platform_details["variants"][variant].get("driver_type"): variant_driver_data = platform_details["variants"][variant].pop("driver_type") final_driver = variant_driver_data["sync"] return final_driver if isinstance(platform_details["driver_type"], str): driver_type = platform_details["driver_type"] standard_final_driver = cls.DRIVER_MAP.get(driver_type, None) if standard_final_driver: return standard_final_driver final_driver = platform_details["driver_type"]["sync"] return final_driver @classmethod def _get_community_driver( cls, community_platform_name: str, variant: Optional[str] ) -> Tuple[Union[Type[NetworkDriver], Type[GenericDriver]], Dict[str, Any]]: """ Get community driver Args: community_platform_name: name of community variant: optional name of variant of community platform Returns: NetworkDriver: final driver class Raises: N/A """ platform_details = _get_community_platform_details( community_platform_name=community_platform_name ) final_driver = cls._get_driver_class(platform_details=platform_details, variant=variant) final_platform_kwargs = _get_driver_kwargs( platform_details=platform_details, variant=variant, _async=False ) return final_driver, final_platform_kwargs @classmethod def _get_driver( cls, platform: str, variant: Optional[str] ) -> Tuple[Union[Type[NetworkDriver], Type[GenericDriver]], Dict[str, Any]]: """ Parent get driver method for sync Scrapli Args: platform: name of target platform; i.e. `cisco_iosxe`, `arista_eos`, etc. variant: name of the target platform variant Returns: NetworkDriver: final driver class; generally NetworkDriver, but for some community platforms could be GenericDriver, also returns any additional kwargs comming from the community platform (if any) Raises: N/A """ additional_kwargs: Dict[str, Any] = {} final_driver: Union[Type[GenericDriver], Type[NetworkDriver]] if platform in cls.CORE_PLATFORM_MAP: final_driver = cls.CORE_PLATFORM_MAP[platform] msg = f"Driver '{final_driver}' selected from scrapli core drivers" else: final_driver, additional_kwargs = cls._get_community_driver( community_platform_name=platform, variant=variant ) msg = ( f"Driver '{final_driver}' selected from scrapli community platforms, with the " f"following platform arguments: '{additional_kwargs}'" ) logger.info(msg) return final_driver, additional_kwargs def __new__( # pylint: disable=R0914 cls, platform: str, host: str, privilege_levels: Optional[Dict[str, PrivilegeLevel]] = None, default_desired_privilege_level: Optional[str] = None, port: Optional[int] = None, auth_username: Optional[str] = None, auth_password: Optional[str] = None, auth_private_key: Optional[str] = None, auth_private_key_passphrase: Optional[str] = None, auth_strict_key: Optional[bool] = None, auth_bypass: Optional[bool] = None, timeout_socket: Optional[float] = None, timeout_transport: Optional[float] = None, timeout_ops: Optional[float] = None, comms_return_char: Optional[str] = None, comms_ansi: Optional[bool] = None, ssh_config_file: Optional[Union[str, bool]] = None, ssh_known_hosts_file: Optional[Union[str, bool]] = None, on_init: Optional[Callable[..., Any]] = None, on_open: Optional[Callable[..., Any]] = None, on_close: Optional[Callable[..., Any]] = None, transport: Optional[str] = None, transport_options: Optional[Dict[str, Any]] = None, channel_log: Optional[Union[str, bool, BytesIO]] = None, channel_lock: Optional[bool] = None, channel_log_mode: Optional[str] = None, logging_uid: Optional[str] = None, auth_secondary: Optional[str] = None, failed_when_contains: Optional[List[str]] = None, textfsm_platform: Optional[str] = None, genie_platform: Optional[str] = None, variant: Optional[str] = None, **kwargs: Dict[Any, Any], ) -> "Scrapli": r""" Scrapli Factory method for synchronous drivers Args: platform: name of the scrapli platform to return a connection object for; should be one of the "core" platforms or a valid community platform name host: host ip/name to connect to port: port to connect to auth_username: username for authentication auth_private_key: path to private key for authentication auth_private_key_passphrase: passphrase for decrypting ssh key if necessary auth_password: password for authentication auth_strict_key: strict host checking or not auth_bypass: bypass "in channel" authentication -- only supported with telnet, asynctelnet, and system transport plugins timeout_socket: timeout for establishing socket/initial connection in seconds timeout_transport: timeout for ssh|telnet transport in seconds timeout_ops: timeout for ssh channel operations comms_return_char: character to use to send returns to host comms_ansi: True/False strip comms_ansi characters from output, generally the default value of False should be fine ssh_config_file: string to path for ssh config file, True to use default ssh config file or False to ignore default ssh config file ssh_known_hosts_file: string to path for ssh known hosts file, True to use default known file locations. Only applicable/needed if `auth_strict_key` is set to True on_init: callable that accepts the class instance as its only argument. this callable, if provided, is executed as the last step of object instantiation -- its purpose is primarily to provide a mechanism for scrapli community platforms to have an easy way to modify initialization arguments/object attributes without needing to create a class that extends the driver, instead allowing the community platforms to simply build from the GenericDriver or NetworkDriver classes, and pass this callable to do things such as appending to a username (looking at you RouterOS!!). Note that this is *always* a synchronous function (even for asyncio drivers)! on_open: callable that accepts the class instance as its only argument. this callable, if provided, is executed immediately after authentication is completed. Common use cases for this callable would be to disable paging or accept any kind of banner message that prompts a user upon connection on_close: callable that accepts the class instance as its only argument. this callable, if provided, is executed immediately prior to closing the underlying transport. Common use cases for this callable would be to save configurations prior to exiting, or to logout properly to free up vtys or similar transport: name of the transport plugin to use for the actual telnet/ssh/netconf connection. Available "core" transports are: - system - telnet - asynctelnet - ssh2 - paramiko - asyncssh Please see relevant transport plugin section for details. Additionally third party transport plugins may be available. transport_options: dictionary of options to pass to selected transport class; see docs for given transport class for details of what to pass here channel_lock: True/False to lock the channel (threading.Lock/asyncio.Lock) during any channel operations, defaults to False channel_log: True/False or a string path to a file of where to write out channel logs -- these are not "logs" in the normal logging module sense, but only the output that is read from the channel. In other words, the output of the channel log should look similar to what you would see as a human connecting to a device channel_log_mode: "write"|"append", all other values will raise ValueError, does what it sounds like it should by setting the channel log to the provided mode logging_uid: unique identifier (string) to associate to log messages; useful if you have multiple connections to the same device (i.e. one console, one ssh, or one to each supervisor module, etc.) failed_when_contains: list of strings indicating command/config failure textfsm_platform: string to use to fetch ntc-templates templates for textfsm parsing genie_platform: string to use to fetch genie parser templates privilege_levels: optional user provided privilege levels, if left None will default to scrapli standard privilege levels default_desired_privilege_level: string of name of default desired priv, this is the priv level that is generally used to disable paging/set terminal width and things like that upon first login, and is also the priv level scrapli will try to acquire for normal "command" operations (`send_command`, `send_commands`) auth_secondary: password to use for secondary authentication (enable) failed_when_contains: List of strings that indicate a command/config has failed variant: name of the community platform variant if desired **kwargs: should be unused, but here to accept any additional kwargs from users Returns: final_driver: synchronous driver class for provided driver Raises: ScrapliValueError: if provided transport is asyncio ScrapliTypeError: if `platform` not in keyword arguments """ logger.debug("Scrapli factory initialized") if transport in ASYNCIO_TRANSPORTS: raise ScrapliValueError("Use 'AsyncScrapli' if using an async transport!") if not isinstance(platform, str): raise ScrapliTypeError(f"Argument 'platform' must be 'str' got '{type(platform)}'") provided_kwargs = _build_provided_kwargs_dict( host=host, port=port, auth_username=auth_username, auth_password=auth_password, auth_private_key=auth_private_key, auth_private_key_passphrase=auth_private_key_passphrase, auth_strict_key=auth_strict_key, auth_bypass=auth_bypass, timeout_socket=timeout_socket, timeout_transport=timeout_transport, timeout_ops=timeout_ops, comms_return_char=comms_return_char, comms_ansi=comms_ansi, ssh_config_file=ssh_config_file, ssh_known_hosts_file=ssh_known_hosts_file, on_init=on_init, on_open=on_open, on_close=on_close, transport=transport, transport_options=transport_options, channel_log=channel_log, channel_log_mode=channel_log_mode, channel_lock=channel_lock, logging_uid=logging_uid, privilege_levels=privilege_levels, default_desired_privilege_level=default_desired_privilege_level, auth_secondary=auth_secondary, failed_when_contains=failed_when_contains, textfsm_platform=textfsm_platform, genie_platform=genie_platform, **kwargs, ) final_driver, additional_kwargs = cls._get_driver(platform=platform, variant=variant) # at this point will need to merge the additional kwargs in (for community drivers), # ensure that kwargs passed by user supersede the ones coming from community platform if additional_kwargs: final_kwargs = {**additional_kwargs, **provided_kwargs} else: final_kwargs = provided_kwargs final_conn = final_driver(**final_kwargs) # cast the final conn to type Scrapli to appease mypy -- we know it will be a NetworkDriver # or GenericDriver, but thats ok =) final_conn = cast(Scrapli, final_conn) return final_conn class AsyncScrapli(AsyncNetworkDriver): CORE_PLATFORM_MAP = { "arista_eos": AsyncEOSDriver, "cisco_iosxe": AsyncIOSXEDriver, "cisco_iosxr": AsyncIOSXRDriver, "cisco_nxos": AsyncNXOSDriver, "juniper_junos": AsyncJunosDriver, } DRIVER_MAP = {"network": AsyncNetworkDriver, "generic": AsyncGenericDriver} @classmethod def _get_driver_class( cls, platform_details: Dict[str, Any], variant: Optional[str] ) -> Union[Type[AsyncNetworkDriver], Type[AsyncGenericDriver]]: """ Fetch community driver class based on platform details Args: platform_details: dict of details about community platform from scrapli_community library variant: optional name of variant of community platform Returns: NetworkDriver: final driver class Raises: N/A """ final_driver: Union[ Type[AsyncNetworkDriver], Type[AsyncGenericDriver], ] if variant and platform_details["variants"][variant].get("driver_type"): variant_driver_data = platform_details["variants"][variant].pop("driver_type") final_driver = variant_driver_data["async"] return final_driver if isinstance(platform_details["driver_type"], str): driver_type = platform_details["driver_type"] standard_final_driver = cls.DRIVER_MAP.get(driver_type, None) if standard_final_driver: return standard_final_driver final_driver = platform_details["driver_type"]["async"] return final_driver @classmethod def _get_community_driver( cls, community_platform_name: str, variant: Optional[str] ) -> Tuple[Union[Type[AsyncNetworkDriver], Type[AsyncGenericDriver]], Dict[str, Any]]: """ Get community driver Args: community_platform_name: name of community variant: optional name of variant of community platform Returns: NetworkDriver: final driver class Raises: N/A """ platform_details = _get_community_platform_details( community_platform_name=community_platform_name ) final_driver = cls._get_driver_class(platform_details=platform_details, variant=variant) final_platform_kwargs = _get_driver_kwargs( platform_details=platform_details, variant=variant, _async=True ) return final_driver, final_platform_kwargs @classmethod def _get_driver( cls, platform: str, variant: Optional[str] ) -> Tuple[Union[Type[AsyncNetworkDriver], Type[AsyncGenericDriver]], Dict[str, Any]]: """ Parent get driver method for sync Scrapli Args: platform: name of target platform; i.e. `cisco_iosxe`, `arista_eos`, etc. variant: name of the target platform variant Returns: NetworkDriver: final driver class; generally NetworkDriver, but for some community platforms could be GenericDriver, also returns any additional kwargs comming from the community platform (if any) Raises: N/A """ additional_kwargs: Dict[str, Any] = {} final_driver: Union[Type[AsyncGenericDriver], Type[AsyncNetworkDriver]] if platform in cls.CORE_PLATFORM_MAP: final_driver = cls.CORE_PLATFORM_MAP[platform] msg = f"Driver '{final_driver}' selected from scrapli core drivers" else: final_driver, additional_kwargs = cls._get_community_driver( community_platform_name=platform, variant=variant ) msg = ( f"Driver '{final_driver}' selected from scrapli community platforms, with the " f"following platform arguments: '{additional_kwargs}'" ) logger.info(msg) return final_driver, additional_kwargs def __new__( # pylint: disable=R0914 cls, platform: str, host: str, privilege_levels: Optional[Dict[str, PrivilegeLevel]] = None, default_desired_privilege_level: Optional[str] = None, port: Optional[int] = None, auth_username: Optional[str] = None, auth_password: Optional[str] = None, auth_private_key: Optional[str] = None, auth_private_key_passphrase: Optional[str] = None, auth_strict_key: Optional[bool] = None, auth_bypass: Optional[bool] = None, timeout_socket: Optional[float] = None, timeout_transport: Optional[float] = None, timeout_ops: Optional[float] = None, comms_return_char: Optional[str] = None, comms_ansi: Optional[bool] = None, ssh_config_file: Optional[Union[str, bool]] = None, ssh_known_hosts_file: Optional[Union[str, bool]] = None, on_init: Optional[Callable[..., Any]] = None, on_open: Optional[Callable[..., Any]] = None, on_close: Optional[Callable[..., Any]] = None, transport: Optional[str] = None, transport_options: Optional[Dict[str, Any]] = None, channel_log: Optional[Union[str, bool, BytesIO]] = None, channel_log_mode: Optional[str] = None, channel_lock: Optional[bool] = None, logging_uid: Optional[str] = None, auth_secondary: Optional[str] = None, failed_when_contains: Optional[List[str]] = None, textfsm_platform: Optional[str] = None, genie_platform: Optional[str] = None, variant: Optional[str] = None, **kwargs: Dict[Any, Any], ) -> "AsyncScrapli": r""" Scrapli Factory method for asynchronous drivers Args: platform: name of the scrapli platform to return a connection object for; should be one of the "core" platforms or a valid community platform name host: host ip/name to connect to port: port to connect to auth_username: username for authentication auth_private_key: path to private key for authentication auth_private_key_passphrase: passphrase for decrypting ssh key if necessary auth_password: password for authentication auth_strict_key: strict host checking or not auth_bypass: bypass "in channel" authentication -- only supported with telnet, asynctelnet, and system transport plugins timeout_socket: timeout for establishing socket/initial connection in seconds timeout_transport: timeout for ssh|telnet transport in seconds timeout_ops: timeout for ssh channel operations comms_return_char: character to use to send returns to host comms_ansi: True/False strip comms_ansi characters from output, generally the default value of False should be fine ssh_config_file: string to path for ssh config file, True to use default ssh config file or False to ignore default ssh config file ssh_known_hosts_file: string to path for ssh known hosts file, True to use default known file locations. Only applicable/needed if `auth_strict_key` is set to True on_init: callable that accepts the class instance as its only argument. this callable, if provided, is executed as the last step of object instantiation -- its purpose is primarily to provide a mechanism for scrapli community platforms to have an easy way to modify initialization arguments/object attributes without needing to create a class that extends the driver, instead allowing the community platforms to simply build from the GenericDriver or NetworkDriver classes, and pass this callable to do things such as appending to a username (looking at you RouterOS!!). Note that this is *always* a synchronous function (even for asyncio drivers)! on_open: callable that accepts the class instance as its only argument. this callable, if provided, is executed immediately after authentication is completed. Common use cases for this callable would be to disable paging or accept any kind of banner message that prompts a user upon connection on_close: callable that accepts the class instance as its only argument. this callable, if provided, is executed immediately prior to closing the underlying transport. Common use cases for this callable would be to save configurations prior to exiting, or to logout properly to free up vtys or similar transport: name of the transport plugin to use for the actual telnet/ssh/netconf connection. Available "core" transports are: - system - telnet - asynctelnet - ssh2 - paramiko - asyncssh Please see relevant transport plugin section for details. Additionally third party transport plugins may be available. transport_options: dictionary of options to pass to selected transport class; see docs for given transport class for details of what to pass here channel_lock: True/False to lock the channel (threading.Lock/asyncio.Lock) during any channel operations, defaults to False channel_log: True/False or a string path to a file of where to write out channel logs -- these are not "logs" in the normal logging module sense, but only the output that is read from the channel. In other words, the output of the channel log should look similar to what you would see as a human connecting to a device channel_log_mode: "write"|"append", all other values will raise ValueError, does what it sounds like it should by setting the channel log to the provided mode logging_uid: unique identifier (string) to associate to log messages; useful if you have multiple connections to the same device (i.e. one console, one ssh, or one to each supervisor module, etc.) failed_when_contains: list of strings indicating command/config failure textfsm_platform: string to use to fetch ntc-templates templates for textfsm parsing genie_platform: string to use to fetch genie parser templates privilege_levels: optional user provided privilege levels, if left None will default to scrapli standard privilege levels default_desired_privilege_level: string of name of default desired priv, this is the priv level that is generally used to disable paging/set terminal width and things like that upon first login, and is also the priv level scrapli will try to acquire for normal "command" operations (`send_command`, `send_commands`) auth_secondary: password to use for secondary authentication (enable) failed_when_contains: List of strings that indicate a command/config has failed variant: name of the community platform variant if desired **kwargs: should be unused, but here to accept any additional kwargs from users Returns: final_driver: asynchronous driver class for provided driver Raises: ScrapliValueError: if provided transport is asyncio ScrapliTypeError: if `platform` not in keyword arguments """ logger.debug("AsyncScrapli factory initialized") if transport not in ASYNCIO_TRANSPORTS: raise ScrapliValueError("Use 'Scrapli' if using a synchronous transport!") if not isinstance(platform, str): raise ScrapliTypeError(f"Argument 'platform' must be 'str' got '{type(platform)}'") provided_kwargs = _build_provided_kwargs_dict( host=host, port=port, auth_username=auth_username, auth_password=auth_password, auth_private_key=auth_private_key, auth_private_key_passphrase=auth_private_key_passphrase, auth_strict_key=auth_strict_key, auth_bypass=auth_bypass, timeout_socket=timeout_socket, timeout_transport=timeout_transport, timeout_ops=timeout_ops, comms_return_char=comms_return_char, comms_ansi=comms_ansi, ssh_config_file=ssh_config_file, ssh_known_hosts_file=ssh_known_hosts_file, on_init=on_init, on_open=on_open, on_close=on_close, transport=transport, transport_options=transport_options, channel_log=channel_log, channel_log_mode=channel_log_mode, channel_lock=channel_lock, logging_uid=logging_uid, privilege_levels=privilege_levels, default_desired_privilege_level=default_desired_privilege_level, auth_secondary=auth_secondary, failed_when_contains=failed_when_contains, textfsm_platform=textfsm_platform, genie_platform=genie_platform, **kwargs, ) final_driver, additional_kwargs = cls._get_driver(platform=platform, variant=variant) # at this point will need to merge the additional kwargs in (for community drivers), # ensure that kwargs passed by user supersede the ones coming from community platform if additional_kwargs: final_kwargs = {**additional_kwargs, **provided_kwargs} else: final_kwargs = provided_kwargs final_conn = final_driver(**final_kwargs) # cast the final conn to type Scrapli to appease mypy -- we know it will be a NetworkDriver # or GenericDriver, but thats ok =) final_conn = cast(AsyncScrapli, final_conn) return final_conn
from contextlib import contextmanager from logging import getLogger, Logger from typing import Iterator, Tuple, ClassVar from sqlalchemy import create_engine from sqlalchemy.orm import Session as sqlaSession, sessionmaker from sqlalchemy.pool import StaticPool from .base import Base class Database: """ A simplified connection interface for an SQLAlchemy engine. """ __slots__ = ["engine", "_Session"] _Session: sessionmaker logger: ClassVar[Logger] = getLogger(__name__) def __init__( self, /, conn_args: dict = None, *, dialect: str = None, host: str = None, username: str = None, password: str = None, port: int = None, database_name: str = None, extra_engine_args: dict = None, ) -> None: kwargs = locals() kwargs.pop("conn_args") kwargs = {k: v for k, v in kwargs.items() if v} assert kwargs or conn_args, "Specify either conn_args or one of the kwargs" config = conn_args or kwargs uri, engine_args = self._get_connection_conf(config) self.engine = create_engine(uri, **engine_args) self._Session = sessionmaker( autocommit=False, autoflush=False, expire_on_commit=False, bind=self.engine, ) # noinspection PyMethodMayBeStatic def _get_connection_conf(self, config) -> Tuple[str, dict]: """ Automatically performs optimal parametrization of the SQLAlchemy engine.\n If this functionality is not desired, a manual override of this function, can be performed using the 'override_engine' function. :return: Tuple containing the SQLAlchemy database uri and engine parameters """ # Make sure that at least the dialect option exists assert config.get("dialect", False), "Database dialect selection is mandatory" # If host is empty and the dialect is 'sqlite', we know the user wants :memory: dialect, host = config.get("dialect"), config.get("host", None) engine_args: dict = {"pool_pre_ping": True} if dialect == "sqlite": engine_args.update({"connect_args": {"check_same_thread": False}}) if not host: engine_args.update({"poolclass": StaticPool}) uri: str = f"{dialect}:///{host if host else ":memory:"}" # This is the option for all other database types, such as Postgres or MySQL else: username, password, port, database = tuple( config.get(attr, None) for attr in ("username", "password", "port", "database") ) if not username and not port: raise ValueError("Credentials and / or port missing") uri: str = f"{dialect}:///{username}:{password}@{host}:{port}/{database}" return uri, engine_args def override_engine(self, uri, **kwargs): """ Provides an option to manually override the auto-specced engine.\n :param uri: Database URI :param kwargs: Normal kwargs as provided to the create_engine factory """ self.engine = create_engine(uri, **kwargs) @contextmanager def Session(self) -> Iterator[sqlaSession]: """ Provides access to a scoped ORM Session """ session: sqlaSession = self._Session() # noqa try: yield session session.commit() except Exception as exc: # noqa E722 # In case there is an error during execution we rollback, # so no corrupted data reaches the database or fills the cache session.rollback() raise exc finally: # Always close the session after operations session.close() def create(self, base_override=None) -> None: """ Creates all tables in the current Base """ base = base_override or Base self.logger.info(f"Constructing Database: {str(self.engine.url)}") for table in base.metadata.sorted_tables: self.logger.info(f" {table.name}") base.metadata.create_all(bind=self.engine) def destroy(self, base_override=None) -> None: """ Destroys all tables in the current Base """ base = base_override or Base base.metadata.drop_all(bind=self.engine) __all__ = ["Database"]
from contextlib import contextmanager from logging import getLogger, Logger from typing import Iterator, Tuple, ClassVar from sqlalchemy import create_engine from sqlalchemy.orm import Session as sqlaSession, sessionmaker from sqlalchemy.pool import StaticPool from .base import Base class Database: """ A simplified connection interface for an SQLAlchemy engine. """ __slots__ = ["engine", "_Session"] _Session: sessionmaker logger: ClassVar[Logger] = getLogger(__name__) def __init__( self, /, conn_args: dict = None, *, dialect: str = None, host: str = None, username: str = None, password: str = None, port: int = None, database_name: str = None, extra_engine_args: dict = None, ) -> None: kwargs = locals() kwargs.pop("conn_args") kwargs = {k: v for k, v in kwargs.items() if v} assert kwargs or conn_args, "Specify either conn_args or one of the kwargs" config = conn_args or kwargs uri, engine_args = self._get_connection_conf(config) self.engine = create_engine(uri, **engine_args) self._Session = sessionmaker( autocommit=False, autoflush=False, expire_on_commit=False, bind=self.engine, ) # noinspection PyMethodMayBeStatic def _get_connection_conf(self, config) -> Tuple[str, dict]: """ Automatically performs optimal parametrization of the SQLAlchemy engine.\n If this functionality is not desired, a manual override of this function, can be performed using the 'override_engine' function. :return: Tuple containing the SQLAlchemy database uri and engine parameters """ # Make sure that at least the dialect option exists assert config.get("dialect", False), "Database dialect selection is mandatory" # If host is empty and the dialect is 'sqlite', we know the user wants :memory: dialect, host = config.get("dialect"), config.get("host", None) engine_args: dict = {"pool_pre_ping": True} if dialect == "sqlite": engine_args.update({"connect_args": {"check_same_thread": False}}) if not host: engine_args.update({"poolclass": StaticPool}) uri: str = f"{dialect}:///{host if host else ':memory:'}" # This is the option for all other database types, such as Postgres or MySQL else: username, password, port, database = tuple( config.get(attr, None) for attr in ("username", "password", "port", "database") ) if not username and not port: raise ValueError("Credentials and / or port missing") uri: str = f"{dialect}:///{username}:{password}@{host}:{port}/{database}" return uri, engine_args def override_engine(self, uri, **kwargs): """ Provides an option to manually override the auto-specced engine.\n :param uri: Database URI :param kwargs: Normal kwargs as provided to the create_engine factory """ self.engine = create_engine(uri, **kwargs) @contextmanager def Session(self) -> Iterator[sqlaSession]: """ Provides access to a scoped ORM Session """ session: sqlaSession = self._Session() # noqa try: yield session session.commit() except Exception as exc: # noqa E722 # In case there is an error during execution we rollback, # so no corrupted data reaches the database or fills the cache session.rollback() raise exc finally: # Always close the session after operations session.close() def create(self, base_override=None) -> None: """ Creates all tables in the current Base """ base = base_override or Base self.logger.info(f"Constructing Database: {str(self.engine.url)}") for table in base.metadata.sorted_tables: self.logger.info(f" {table.name}") base.metadata.create_all(bind=self.engine) def destroy(self, base_override=None) -> None: """ Destroys all tables in the current Base """ base = base_override or Base base.metadata.drop_all(bind=self.engine) __all__ = ["Database"]
#!/usr/bin/env python # SPDX-License-Identifier: Apache-2.0 import os import io from onnx import defs, load, AttributeProto from onnx.backend.test.case import collect_snippets from onnx.backend.test.runner import Runner from onnx.backend.test.loader import load_model_tests from typing import Any, IO, Sequence, Dict, List def is_ml(schemas: Sequence[defs.OpSchema]) -> bool: for s in schemas: if s.domain == 'ai.onnx.ml': return True return False def gen_outlines(f: IO[Any], ml: bool) -> None: f.write('# Test Coverage Report') if ml: f.write(' (ONNX-ML Operators)\n') else: f.write(' (ONNX Core Operators)\n') f.write('## Outlines\n') f.write('* [Node Test Coverage](#node-test-coverage)\n') f.write('* [Model Test Coverage](#model-test-coverage)\n') f.write('* [Overall Test Coverage](#overall-test-coverage)\n') common_covered: Sequence[str] = [] experimental_covered: Sequence[str] = [] def gen_node_test_coverage(schemas: Sequence[defs.OpSchema], f: IO[Any], ml: bool) -> None: global common_covered global experimental_covered generators = set({ 'Multinomial', 'RandomNormal', 'RandomNormalLike', 'RandomUniform', 'RandomUniformLike', }) node_tests = collect_snippets() common_covered = sorted(s.name for s in schemas if s.name in node_tests and s.support_level == defs.OpSchema.SupportType.COMMON and (s.domain == 'ai.onnx.ml') == ml) common_no_cover = sorted(s.name for s in schemas if s.name not in node_tests and s.support_level == defs.OpSchema.SupportType.COMMON and (s.domain == 'ai.onnx.ml') == ml) common_generator = sorted(name for name in common_no_cover if name in generators) experimental_covered = sorted(s.name for s in schemas if s.name in node_tests and s.support_level == defs.OpSchema.SupportType.EXPERIMENTAL and (s.domain == 'ai.onnx.ml') == ml) experimental_no_cover = sorted(s.name for s in schemas if s.name not in node_tests and s.support_level == defs.OpSchema.SupportType.EXPERIMENTAL and (s.domain == 'ai.onnx.ml') == ml) experimental_generator = sorted(name for name in experimental_no_cover if name in generators) num_common = len(common_covered) + len(common_no_cover) \ - len(common_generator) num_experimental = len(experimental_covered) + len(experimental_no_cover) \ - len(experimental_generator) f.write('# Node Test Coverage\n') f.write('## Summary\n') if num_common: f.write('Node tests have covered {}/{} ({:.2f}%, {} generators excluded) ' 'common operators.\n\n'.format( len(common_covered), num_common, (len(common_covered) / float(num_common) * 100), len(common_generator))) else: f.write('Node tests have covered 0/0 (N/A) common operators. \n\n') if num_experimental: f.write('Node tests have covered {}/{} ({:.2f}%, {} generators excluded) ' 'experimental operators.\n\n'.format( len(experimental_covered), num_experimental, (len(experimental_covered) / float(num_experimental) * 100), len(experimental_generator))) else: f.write('Node tests have covered 0/0 (N/A) experimental operators.\n\n') titles = ['&#x1F49A;Covered Common Operators', '&#x1F494;No Cover Common Operators', '&#x1F49A;Covered Experimental Operators', '&#x1F494;No Cover Experimental Operators', ] all_lists = [common_covered, common_no_cover, experimental_covered, experimental_no_cover] for t in titles: f.write(f"* [{t[9:]}](#{t[9:].lower().replace(" ", "-")})\n") f.write('\n') for t, l in zip(titles, all_lists): f.write(f'## {t}\n') for s in l: f.write(f'### {s}') if s in node_tests: f.write('\nThere are {} test cases, listed as following:\n'.format( len(node_tests[s]))) for summary, code in sorted(node_tests[s]): f.write('<details>\n') f.write(f'<summary>{summary}</summary>\n\n') f.write(f'```python\n{code}\n```\n\n') f.write('</details>\n') else: if s in generators: f.write(' (random generator operator)\n') else: f.write(' (call for test cases)\n') f.write('\n\n') f.write('<br/>\n\n') def gen_model_test_coverage(schemas: Sequence[defs.OpSchema], f: IO[Any], ml: bool) -> None: f.write('# Model Test Coverage\n') # Process schemas schema_dict = dict() for schema in schemas: schema_dict[schema.name] = schema # Load models from each model test using Runner.prepare_model_data # Need to grab associated nodes attrs: Dict[str, Dict[str, List[Any]]] = dict() model_paths: List[Any] = [] for rt in load_model_tests(kind='real'): model_dir = Runner.prepare_model_data(rt) model_paths.append(os.path.join(model_dir, 'model.onnx')) model_paths.sort() model_written = False for model_pb_path in model_paths: model = load(model_pb_path) if ml: ml_present = False for opset in model.opset_import: if opset.domain == 'ai.onnx.ml': ml_present = True if not ml_present: continue else: model_written = True f.write(f'## {model.graph.name}\n') # Deconstruct model num_covered = 0 for node in model.graph.node: if node.op_type in common_covered or node.op_type in experimental_covered: num_covered += 1 # Add details of which nodes are/aren't covered # Iterate through and store each node's attributes for attr in node.attribute: if node.op_type not in attrs: attrs[node.op_type] = dict() if attr.name not in attrs[node.op_type]: attrs[node.op_type][attr.name] = [] if attr.type == AttributeProto.FLOAT: if attr.f not in attrs[node.op_type][attr.name]: attrs[node.op_type][attr.name].append(attr.f) elif attr.type == AttributeProto.INT: if attr.i not in attrs[node.op_type][attr.name]: attrs[node.op_type][attr.name].append(attr.i) elif attr.type == AttributeProto.STRING: if attr.s not in attrs[node.op_type][attr.name]: attrs[node.op_type][attr.name].append(attr.s) elif attr.type == AttributeProto.TENSOR: if attr.t not in attrs[node.op_type][attr.name]: attrs[node.op_type][attr.name].append(attr.t) elif attr.type == AttributeProto.GRAPH: if attr.g not in attrs[node.op_type][attr.name]: attrs[node.op_type][attr.name].append(attr.g) elif attr.type == AttributeProto.FLOATS: if attr.floats not in attrs[node.op_type][attr.name]: attrs[node.op_type][attr.name].append(attr.floats) elif attr.type == AttributeProto.INTS: if attr.ints not in attrs[node.op_type][attr.name]: attrs[node.op_type][attr.name].append(attr.ints) elif attr.type == AttributeProto.STRINGS: if attr.strings not in attrs[node.op_type][attr.name]: attrs[node.op_type][attr.name].append(attr.strings) elif attr.type == AttributeProto.TENSORS: if attr.tensors not in attrs[node.op_type][attr.name]: attrs[node.op_type][attr.name].append(attr.tensors) elif attr.type == AttributeProto.GRAPHS: if attr.graphs not in attrs[node.op_type][attr.name]: attrs[node.op_type][attr.name].append(attr.graphs) f.write('\n{} has {} nodes. Of these, {} are covered by node tests ({}%)\n\n\n'.format( model.graph.name, num_covered, len(model.graph.node), 100.0 * float( num_covered) / float(len(model.graph.node)))) # Iterate through attrs, print f.write('<details>\n') f.write('<summary>nodes</summary>\n\n') for op in sorted(attrs): f.write('<details>\n') # Get total number of attributes for node schema f.write('<summary>{}: {} out of {} attributes covered</summary>\n\n' .format(op, len(attrs[op].keys()), len(schema_dict[op] .attributes))) for attribute in sorted(schema_dict[op].attributes): if attribute in attrs[op]: f.write(f'{attribute}: {len(attrs[op][attribute])}\n') else: f.write(f'{attribute}: 0\n') f.write('</details>\n') f.write('</details>\n\n\n') if not model_written and ml: f.write('No model tests present for selected domain\n') def gen_overall_test_coverage(schemas: Sequence[defs.OpSchema], f: IO[Any], ml: bool) -> None: f.write('# Overall Test Coverage\n') f.write('## To be filled.\n') def gen_spdx(f: IO[Any]) -> None: f.write('<!--- SPDX-License-Identifier: Apache-2.0 -->\n') def main() -> None: base_dir = os.path.dirname(os.path.dirname(os.path.dirname( os.path.dirname(os.path.realpath(__file__))))) docs_dir = os.path.join(base_dir, 'docs') schemas = defs.get_all_schemas() has_ml = is_ml(schemas) fname = os.path.join(docs_dir, 'TestCoverage.md') with open(fname, 'w+', newline='', encoding="utf-8") as f: # type: ignore gen_spdx(f) gen_outlines(f, False) gen_node_test_coverage(schemas, f, False) gen_model_test_coverage(schemas, f, False) gen_overall_test_coverage(schemas, f, False) if has_ml: fname = os.path.join(docs_dir, 'TestCoverage-ml.md') with open(fname, 'w+', newline='', encoding="utf-8") as f: # type: ignore gen_spdx(f) gen_outlines(f, True) gen_node_test_coverage(schemas, f, True) gen_model_test_coverage(schemas, f, True) gen_overall_test_coverage(schemas, f, True) if __name__ == '__main__': main()
#!/usr/bin/env python # SPDX-License-Identifier: Apache-2.0 import os import io from onnx import defs, load, AttributeProto from onnx.backend.test.case import collect_snippets from onnx.backend.test.runner import Runner from onnx.backend.test.loader import load_model_tests from typing import Any, IO, Sequence, Dict, List def is_ml(schemas: Sequence[defs.OpSchema]) -> bool: for s in schemas: if s.domain == 'ai.onnx.ml': return True return False def gen_outlines(f: IO[Any], ml: bool) -> None: f.write('# Test Coverage Report') if ml: f.write(' (ONNX-ML Operators)\n') else: f.write(' (ONNX Core Operators)\n') f.write('## Outlines\n') f.write('* [Node Test Coverage](#node-test-coverage)\n') f.write('* [Model Test Coverage](#model-test-coverage)\n') f.write('* [Overall Test Coverage](#overall-test-coverage)\n') common_covered: Sequence[str] = [] experimental_covered: Sequence[str] = [] def gen_node_test_coverage(schemas: Sequence[defs.OpSchema], f: IO[Any], ml: bool) -> None: global common_covered global experimental_covered generators = set({ 'Multinomial', 'RandomNormal', 'RandomNormalLike', 'RandomUniform', 'RandomUniformLike', }) node_tests = collect_snippets() common_covered = sorted(s.name for s in schemas if s.name in node_tests and s.support_level == defs.OpSchema.SupportType.COMMON and (s.domain == 'ai.onnx.ml') == ml) common_no_cover = sorted(s.name for s in schemas if s.name not in node_tests and s.support_level == defs.OpSchema.SupportType.COMMON and (s.domain == 'ai.onnx.ml') == ml) common_generator = sorted(name for name in common_no_cover if name in generators) experimental_covered = sorted(s.name for s in schemas if s.name in node_tests and s.support_level == defs.OpSchema.SupportType.EXPERIMENTAL and (s.domain == 'ai.onnx.ml') == ml) experimental_no_cover = sorted(s.name for s in schemas if s.name not in node_tests and s.support_level == defs.OpSchema.SupportType.EXPERIMENTAL and (s.domain == 'ai.onnx.ml') == ml) experimental_generator = sorted(name for name in experimental_no_cover if name in generators) num_common = len(common_covered) + len(common_no_cover) \ - len(common_generator) num_experimental = len(experimental_covered) + len(experimental_no_cover) \ - len(experimental_generator) f.write('# Node Test Coverage\n') f.write('## Summary\n') if num_common: f.write('Node tests have covered {}/{} ({:.2f}%, {} generators excluded) ' 'common operators.\n\n'.format( len(common_covered), num_common, (len(common_covered) / float(num_common) * 100), len(common_generator))) else: f.write('Node tests have covered 0/0 (N/A) common operators. \n\n') if num_experimental: f.write('Node tests have covered {}/{} ({:.2f}%, {} generators excluded) ' 'experimental operators.\n\n'.format( len(experimental_covered), num_experimental, (len(experimental_covered) / float(num_experimental) * 100), len(experimental_generator))) else: f.write('Node tests have covered 0/0 (N/A) experimental operators.\n\n') titles = ['&#x1F49A;Covered Common Operators', '&#x1F494;No Cover Common Operators', '&#x1F49A;Covered Experimental Operators', '&#x1F494;No Cover Experimental Operators', ] all_lists = [common_covered, common_no_cover, experimental_covered, experimental_no_cover] for t in titles: f.write(f"* [{t[9:]}](#{t[9:].lower().replace(' ', '-')})\n") f.write('\n') for t, l in zip(titles, all_lists): f.write(f'## {t}\n') for s in l: f.write(f'### {s}') if s in node_tests: f.write('\nThere are {} test cases, listed as following:\n'.format( len(node_tests[s]))) for summary, code in sorted(node_tests[s]): f.write('<details>\n') f.write(f'<summary>{summary}</summary>\n\n') f.write(f'```python\n{code}\n```\n\n') f.write('</details>\n') else: if s in generators: f.write(' (random generator operator)\n') else: f.write(' (call for test cases)\n') f.write('\n\n') f.write('<br/>\n\n') def gen_model_test_coverage(schemas: Sequence[defs.OpSchema], f: IO[Any], ml: bool) -> None: f.write('# Model Test Coverage\n') # Process schemas schema_dict = dict() for schema in schemas: schema_dict[schema.name] = schema # Load models from each model test using Runner.prepare_model_data # Need to grab associated nodes attrs: Dict[str, Dict[str, List[Any]]] = dict() model_paths: List[Any] = [] for rt in load_model_tests(kind='real'): model_dir = Runner.prepare_model_data(rt) model_paths.append(os.path.join(model_dir, 'model.onnx')) model_paths.sort() model_written = False for model_pb_path in model_paths: model = load(model_pb_path) if ml: ml_present = False for opset in model.opset_import: if opset.domain == 'ai.onnx.ml': ml_present = True if not ml_present: continue else: model_written = True f.write(f'## {model.graph.name}\n') # Deconstruct model num_covered = 0 for node in model.graph.node: if node.op_type in common_covered or node.op_type in experimental_covered: num_covered += 1 # Add details of which nodes are/aren't covered # Iterate through and store each node's attributes for attr in node.attribute: if node.op_type not in attrs: attrs[node.op_type] = dict() if attr.name not in attrs[node.op_type]: attrs[node.op_type][attr.name] = [] if attr.type == AttributeProto.FLOAT: if attr.f not in attrs[node.op_type][attr.name]: attrs[node.op_type][attr.name].append(attr.f) elif attr.type == AttributeProto.INT: if attr.i not in attrs[node.op_type][attr.name]: attrs[node.op_type][attr.name].append(attr.i) elif attr.type == AttributeProto.STRING: if attr.s not in attrs[node.op_type][attr.name]: attrs[node.op_type][attr.name].append(attr.s) elif attr.type == AttributeProto.TENSOR: if attr.t not in attrs[node.op_type][attr.name]: attrs[node.op_type][attr.name].append(attr.t) elif attr.type == AttributeProto.GRAPH: if attr.g not in attrs[node.op_type][attr.name]: attrs[node.op_type][attr.name].append(attr.g) elif attr.type == AttributeProto.FLOATS: if attr.floats not in attrs[node.op_type][attr.name]: attrs[node.op_type][attr.name].append(attr.floats) elif attr.type == AttributeProto.INTS: if attr.ints not in attrs[node.op_type][attr.name]: attrs[node.op_type][attr.name].append(attr.ints) elif attr.type == AttributeProto.STRINGS: if attr.strings not in attrs[node.op_type][attr.name]: attrs[node.op_type][attr.name].append(attr.strings) elif attr.type == AttributeProto.TENSORS: if attr.tensors not in attrs[node.op_type][attr.name]: attrs[node.op_type][attr.name].append(attr.tensors) elif attr.type == AttributeProto.GRAPHS: if attr.graphs not in attrs[node.op_type][attr.name]: attrs[node.op_type][attr.name].append(attr.graphs) f.write('\n{} has {} nodes. Of these, {} are covered by node tests ({}%)\n\n\n'.format( model.graph.name, num_covered, len(model.graph.node), 100.0 * float( num_covered) / float(len(model.graph.node)))) # Iterate through attrs, print f.write('<details>\n') f.write('<summary>nodes</summary>\n\n') for op in sorted(attrs): f.write('<details>\n') # Get total number of attributes for node schema f.write('<summary>{}: {} out of {} attributes covered</summary>\n\n' .format(op, len(attrs[op].keys()), len(schema_dict[op] .attributes))) for attribute in sorted(schema_dict[op].attributes): if attribute in attrs[op]: f.write(f'{attribute}: {len(attrs[op][attribute])}\n') else: f.write(f'{attribute}: 0\n') f.write('</details>\n') f.write('</details>\n\n\n') if not model_written and ml: f.write('No model tests present for selected domain\n') def gen_overall_test_coverage(schemas: Sequence[defs.OpSchema], f: IO[Any], ml: bool) -> None: f.write('# Overall Test Coverage\n') f.write('## To be filled.\n') def gen_spdx(f: IO[Any]) -> None: f.write('<!--- SPDX-License-Identifier: Apache-2.0 -->\n') def main() -> None: base_dir = os.path.dirname(os.path.dirname(os.path.dirname( os.path.dirname(os.path.realpath(__file__))))) docs_dir = os.path.join(base_dir, 'docs') schemas = defs.get_all_schemas() has_ml = is_ml(schemas) fname = os.path.join(docs_dir, 'TestCoverage.md') with open(fname, 'w+', newline='', encoding="utf-8") as f: # type: ignore gen_spdx(f) gen_outlines(f, False) gen_node_test_coverage(schemas, f, False) gen_model_test_coverage(schemas, f, False) gen_overall_test_coverage(schemas, f, False) if has_ml: fname = os.path.join(docs_dir, 'TestCoverage-ml.md') with open(fname, 'w+', newline='', encoding="utf-8") as f: # type: ignore gen_spdx(f) gen_outlines(f, True) gen_node_test_coverage(schemas, f, True) gen_model_test_coverage(schemas, f, True) gen_overall_test_coverage(schemas, f, True) if __name__ == '__main__': main()
#!/usr/bin/env python # Copyright 2015-2016 Yelp Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import json import os import shutil import socket import sys import threading import time import uuid from os import execlpe from random import randint from urllib.parse import urlparse import requests from docker import errors from paasta_tools.adhoc_tools import get_default_interactive_config from paasta_tools.cli.cmds.check import makefile_responds_to from paasta_tools.cli.cmds.cook_image import paasta_cook_image from paasta_tools.cli.utils import figure_out_service_name from paasta_tools.cli.utils import get_instance_config from paasta_tools.cli.utils import lazy_choices_completer from paasta_tools.cli.utils import list_instances from paasta_tools.cli.utils import pick_random_port from paasta_tools.generate_deployments_for_service import build_docker_image_name from paasta_tools.long_running_service_tools import get_healthcheck_for_instance from paasta_tools.paasta_execute_docker_command import execute_in_container from paasta_tools.secret_tools import get_secret_provider from paasta_tools.secret_tools import is_secret_ref from paasta_tools.secret_tools import is_shared_secret from paasta_tools.secret_tools import SHARED_SECRET_SERVICE from paasta_tools.tron_tools import parse_time_variables from paasta_tools.utils import _run from paasta_tools.utils import DEFAULT_SOA_DIR from paasta_tools.utils import get_docker_client from paasta_tools.utils import get_possible_launched_by_user_variable_from_env from paasta_tools.utils import get_username from paasta_tools.utils import list_clusters from paasta_tools.utils import list_services from paasta_tools.utils import load_system_paasta_config from paasta_tools.utils import NoConfigurationForServiceError from paasta_tools.utils import NoDeploymentsAvailable from paasta_tools.utils import NoDockerImageError from paasta_tools.utils import paasta_print from paasta_tools.utils import PaastaColors from paasta_tools.utils import PaastaNotConfiguredError from paasta_tools.utils import SystemPaastaConfig from paasta_tools.utils import timed_flock from paasta_tools.utils import Timeout from paasta_tools.utils import TimeoutError from paasta_tools.utils import validate_service_instance def parse_date(date_string): return datetime.datetime.strptime(date_string, "%Y-%m-%d") def perform_http_healthcheck(url, timeout): """Returns true if healthcheck on url succeeds, false otherwise :param url: the healthcheck url :param timeout: timeout in seconds :returns: True if healthcheck succeeds within number of seconds specified by timeout, false otherwise """ try: with Timeout(seconds=timeout): try: res = requests.get(url, verify=False) except requests.ConnectionError: return (False, "http request failed: connection failed") except TimeoutError: return (False, "http request timed out after %d seconds" % timeout) if "content-type" in res.headers and "," in res.headers["content-type"]: paasta_print( PaastaColors.yellow( "Multiple content-type headers detected in response." " The Mesos healthcheck system will treat this as a failure!" ) ) return (False, "http request succeeded, code %d" % res.status_code) # check if response code is valid per https://mesosphere.github.io/marathon/docs/health-checks.html elif res.status_code >= 200 and res.status_code < 400: return (True, "http request succeeded, code %d" % res.status_code) else: return (False, "http request failed, code %s" % str(res.status_code)) def perform_tcp_healthcheck(url, timeout): """Returns true if successfully connects to host and port, false otherwise :param url: the healthcheck url (in the form tcp://host:port) :param timeout: timeout in seconds :returns: True if healthcheck succeeds within number of seconds specified by timeout, false otherwise """ url_elem = urlparse(url) sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.settimeout(timeout) result = sock.connect_ex((url_elem.hostname, url_elem.port)) sock.close() if result == 0: return (True, "tcp connection succeeded") else: return (False, "%s (timeout %d seconds)" % (os.strerror(result), timeout)) def perform_cmd_healthcheck(docker_client, container_id, command, timeout): """Returns true if return code of command is 0 when executed inside container, false otherwise :param docker_client: Docker client object :param container_id: Docker container id :param command: command to execute :param timeout: timeout in seconds :returns: True if command exits with return code 0, false otherwise """ (output, return_code) = execute_in_container( docker_client, container_id, command, timeout ) if return_code == 0: return (True, output) else: return (False, output) def run_healthcheck_on_container( docker_client, container_id, healthcheck_mode, healthcheck_data, timeout ): """Performs healthcheck on a container :param container_id: Docker container id :param healthcheck_mode: one of 'http', 'https', 'tcp', or 'cmd' :param healthcheck_data: a URL when healthcheck_mode is 'http[s]' or 'tcp', a command if healthcheck_mode is 'cmd' :param timeout: timeout in seconds for individual check :returns: a tuple of (bool, output string) """ healthcheck_result = (False, "unknown") if healthcheck_mode == "cmd": healthcheck_result = perform_cmd_healthcheck( docker_client, container_id, healthcheck_data, timeout ) elif healthcheck_mode == "http" or healthcheck_mode == "https": healthcheck_result = perform_http_healthcheck(healthcheck_data, timeout) elif healthcheck_mode == "tcp": healthcheck_result = perform_tcp_healthcheck(healthcheck_data, timeout) else: paasta_print( PaastaColors.yellow( "Healthcheck mode '%s' is not currently supported!" % healthcheck_mode ) ) sys.exit(1) return healthcheck_result def simulate_healthcheck_on_service( instance_config, docker_client, container_id, healthcheck_mode, healthcheck_data, healthcheck_enabled, ): """Simulates Marathon-style healthcheck on given service if healthcheck is enabled :param instance_config: service manifest :param docker_client: Docker client object :param container_id: Docker container id :param healthcheck_data: tuple url to healthcheck :param healthcheck_enabled: boolean :returns: healthcheck_passed: boolean """ healthcheck_link = PaastaColors.cyan(healthcheck_data) if healthcheck_enabled: grace_period = instance_config.get_healthcheck_grace_period_seconds() timeout = instance_config.get_healthcheck_timeout_seconds() interval = instance_config.get_healthcheck_interval_seconds() max_failures = instance_config.get_healthcheck_max_consecutive_failures() paasta_print( "\nStarting health check via %s (waiting %s seconds before " "considering failures due to grace period):" % (healthcheck_link, grace_period) ) # silently start performing health checks until grace period ends or first check succeeds graceperiod_end_time = time.time() + grace_period after_grace_period_attempts = 0 healthchecking = True def _stream_docker_logs(container_id, generator): while healthchecking: try: # the generator will block until another log line is available log_line = next(generator).decode("utf-8").rstrip("\n") if healthchecking: paasta_print(f"container [{container_id[:12]}]: {log_line}") else: # stop streaming at first opportunity, since generator.close() # cant be used until the container is dead break except StopIteration: # natural end of logs break docker_logs_generator = docker_client.logs( container_id, stderr=True, stream=True ) threading.Thread( target=_stream_docker_logs, daemon=True, args=(container_id, docker_logs_generator), ).start() while True: # First inspect the container for early exits container_state = docker_client.inspect_container(container_id) if not container_state["State"]["Running"]: paasta_print( PaastaColors.red( "Container exited with code {}".format( container_state["State"]["ExitCode"] ) ) ) healthcheck_passed = False break healthcheck_passed, healthcheck_output = run_healthcheck_on_container( docker_client, container_id, healthcheck_mode, healthcheck_data, timeout ) # Yay, we passed the healthcheck if healthcheck_passed: paasta_print( "{}'{}' (via {})".format( PaastaColors.green("Healthcheck succeeded!: "), healthcheck_output, healthcheck_link, ) ) break # Otherwise, print why we failed if time.time() < graceperiod_end_time: color = PaastaColors.grey msg = "(disregarded due to grace period)" extra_msg = f" (via: {healthcheck_link}. Output: {healthcheck_output})" else: # If we've exceeded the grace period, we start incrementing attempts after_grace_period_attempts += 1 color = PaastaColors.red msg = "(Attempt {} of {})".format( after_grace_period_attempts, max_failures ) extra_msg = f" (via: {healthcheck_link}. Output: {healthcheck_output})" paasta_print("{}{}".format(color(f"Healthcheck failed! {msg}"), extra_msg)) if after_grace_period_attempts == max_failures: break time.sleep(interval) healthchecking = False # end docker logs stream else: paasta_print( "\nPaaSTA would have healthchecked your service via\n%s" % healthcheck_link ) healthcheck_passed = True return healthcheck_passed def read_local_dockerfile_lines(): dockerfile = os.path.join(os.getcwd(), "Dockerfile") return open(dockerfile).readlines() def add_subparser(subparsers): list_parser = subparsers.add_parser( "local-run", help="Run service's Docker image locally", description=( "'paasta local-run' is useful for simulating how a PaaSTA service would be " "executed on a real cluster. It analyzes the local soa-configs and constructs " "a 'docker run' invocation to match. This is useful as a type of end-to-end " "test, ensuring that a service will work inside the docker container as expected. " "Additionally, 'local-run' can healthcheck a service per the configured healthcheck.\n\n" "Alternatively, 'local-run' can be used with --pull, which will pull the currently " "deployed docker image and use it, instead of building one." ), epilog=( "Note: 'paasta local-run' uses docker commands, which may require elevated privileges " "to run (sudo)." ), ) list_parser.add_argument( "-s", "--service", help="The name of the service you wish to inspect" ).completer = lazy_choices_completer(list_services) list_parser.add_argument( "-c", "--cluster", help=( "The name of the cluster you wish to simulate. " "If omitted, uses the default cluster defined in the paasta local-run configs" ), ).completer = lazy_choices_completer(list_clusters) list_parser.add_argument( "-y", "--yelpsoa-config-root", dest="yelpsoa_config_root", help="A directory from which yelpsoa-configs should be read from", default=DEFAULT_SOA_DIR, ) build_pull_group = list_parser.add_mutually_exclusive_group() build_pull_group.add_argument( "-b", "--build", help=( "Build the docker image to run from scratch using the local Makefile's " "'cook-image' target. Defaults to try to use the local Makefile if present." ), action="store_const", const="build", dest="action", ) build_pull_group.add_argument( "-p", "--pull", help=( "Pull the docker image marked for deployment from the Docker registry and " "use that for the local-run. This is the opposite of --build." ), action="store_const", const="pull", dest="action", ) build_pull_group.add_argument( "-d", "--dry-run", help="Shows the arguments supplied to docker as json.", action="store_const", const="dry_run", dest="action", ) build_pull_group.set_defaults(action="build") list_parser.add_argument( "--json-dict", help="When running dry run, output the arguments as a json dict", action="store_true", dest="dry_run_json_dict", ) list_parser.add_argument( "-C", "--cmd", help=( "Run Docker container with particular command, " 'for example: "bash". By default will use the command or args specified by the ' "soa-configs or what was specified in the Dockerfile" ), required=False, default=None, ) list_parser.add_argument( "-i", "--instance", help=( "Simulate a docker run for a particular instance of the service, like 'main' or 'canary'. " "NOTE: if you don't specify an instance, PaaSTA will run in interactive mode" ), required=False, default=None, ).completer = lazy_choices_completer(list_instances) list_parser.add_argument( "--date", default=datetime.datetime.today().strftime("%Y-%m-%d"), help="Date to use for interpolating date variables in a job. Defaults to use %(default)s.", type=parse_date, ) list_parser.add_argument( "-v", "--verbose", help="Show Docker commands output", action="store_true", required=False, default=True, ) list_parser.add_argument( "-I", "--interactive", help=( 'Run container in interactive mode. If interactive is set the default command will be "bash" ' 'unless otherwise set by the "--cmd" flag' ), action="store_true", required=False, default=False, ) list_parser.add_argument( "-k", "--no-healthcheck", help="Disable simulated healthcheck", dest="healthcheck", action="store_false", required=False, default=True, ) list_parser.add_argument( "-t", "--healthcheck-only", help="Terminates container after healthcheck (exits with status code 0 on success, 1 otherwise)", dest="healthcheck_only", action="store_true", required=False, default=False, ) list_parser.add_argument( "-o", "--port", help="Specify a port number to use. If not set, a random non-conflicting port will be found.", type=int, dest="user_port", required=False, default=False, ) list_parser.add_argument( "--vault-auth-method", help="Override how we auth with vault, defaults to token if not present", type=str, dest="vault_auth_method", required=False, default="token", choices=["token", "ldap"], ) list_parser.add_argument( "--vault-token-file", help="Override vault token file, defaults to %(default)s", type=str, dest="vault_token_file", required=False, default="/var/spool/.paasta_vault_token", ) list_parser.add_argument( "--skip-secrets", help="Skip decrypting secrets, useful if running non-interactively", dest="skip_secrets", required=False, action="store_true", default=False, ) list_parser.add_argument( "--sha", help=( "SHA to run instead of the currently marked-for-deployment SHA. Ignored when used with --build." " Must be a version that exists in the registry, i.e. it has been built by Jenkins." ), type=str, dest="sha", required=False, default=None, ) list_parser.set_defaults(command=paasta_local_run) def get_container_name(): return "paasta_local_run_{}_{}".format(get_username(), randint(1, 999999)) def get_docker_run_cmd( memory, chosen_port, container_port, container_name, volumes, env, interactive, docker_hash, command, net, docker_params, detach, ): cmd = ["paasta_docker_wrapper", "run"] for k in env.keys(): cmd.append("--env") cmd.append(f"{k}") cmd.append("--memory=%dm" % memory) for i in docker_params: cmd.append(f"--{i["key"]}={i["value"]}") if net == "bridge" and container_port is not None: cmd.append("--publish=%d:%d" % (chosen_port, container_port)) elif net == "host": cmd.append("--net=host") cmd.append("--name=%s" % container_name) for volume in volumes: cmd.append("--volume=%s" % volume) if interactive: cmd.append("--interactive=true") if sys.stdin.isatty(): cmd.append("--tty=true") else: if detach: cmd.append("--detach=true") cmd.append("%s" % docker_hash) if command: if isinstance(command, str): cmd.extend(("sh", "-c", command)) else: cmd.extend(command) return cmd class LostContainerException(Exception): pass def docker_pull_image(docker_url): """Pull an image via ``docker pull``. Uses the actual pull command instead of the python bindings due to the docker auth/registry transition. Once we are past Docker 1.6 we can use better credential management, but for now this function assumes the user running the command has already been authorized for the registry""" paasta_print( "Please wait while the image (%s) is pulled (times out after 30m)..." % docker_url, file=sys.stderr, ) DEVNULL = open(os.devnull, "wb") with open("/tmp/paasta-local-run-pull.lock", "w") as f: with timed_flock(f, seconds=1800): ret, output = _run( "docker pull %s" % docker_url, stream=True, stdin=DEVNULL ) if ret != 0: paasta_print( "\nPull failed. Are you authorized to run docker commands?", file=sys.stderr, ) sys.exit(ret) def get_container_id(docker_client, container_name): """Use 'docker_client' to find the container we started, identifiable by its 'container_name'. If we can't find the id, raise LostContainerException. """ containers = docker_client.containers(all=False) for container in containers: if "/%s" % container_name in container.get("Names", []): return container.get("Id") raise LostContainerException( "Can't find the container I just launched so I can't do anything else.\n" "Try docker 'ps --all | grep %s' to see where it went.\n" "Here were all the containers:\n" "%s" % (container_name, containers) ) def _cleanup_container(docker_client, container_id): if docker_client.inspect_container(container_id)["State"].get("OOMKilled", False): paasta_print( PaastaColors.red( "Your service was killed by the OOM Killer!\n" "You've exceeded the memory limit, try increasing the mem parameter in your soa_configs" ), file=sys.stderr, ) paasta_print("\nStopping and removing the old container %s..." % container_id) paasta_print("(Please wait or you may leave an orphaned container.)") try: docker_client.stop(container_id) docker_client.remove_container(container_id) paasta_print("...done") except errors.APIError: paasta_print( PaastaColors.yellow( "Could not clean up container! You should stop and remove container '%s' manually." % container_id ) ) def get_local_run_environment_vars(instance_config, port0, framework): """Returns a dictionary of environment variables to simulate what would be available to a paasta service running in a container""" hostname = socket.getfqdn() docker_image = instance_config.get_docker_image() if docker_image == "": # In a local_run environment, the docker_image may not be available # so we can fall-back to the injected DOCKER_TAG per the paasta contract docker_image = os.environ["DOCKER_TAG"] fake_taskid = uuid.uuid4() env = { "HOST": hostname, "MESOS_SANDBOX": "/mnt/mesos/sandbox", "MESOS_CONTAINER_NAME": "localrun-%s" % fake_taskid, "MESOS_TASK_ID": str(fake_taskid), "PAASTA_DOCKER_IMAGE": docker_image, "PAASTA_LAUNCHED_BY": get_possible_launched_by_user_variable_from_env(), } if framework == "marathon": env["MARATHON_PORT"] = str(port0) env["MARATHON_PORT0"] = str(port0) env["MARATHON_PORTS"] = str(port0) env["MARATHON_PORT_%d" % instance_config.get_container_port()] = str(port0) env["MARATHON_APP_VERSION"] = "simulated_marathon_app_version" env["MARATHON_APP_RESOURCE_CPUS"] = str(instance_config.get_cpus()) env["MARATHON_APP_DOCKER_IMAGE"] = docker_image env["MARATHON_APP_RESOURCE_MEM"] = str(instance_config.get_mem()) env["MARATHON_APP_RESOURCE_DISK"] = str(instance_config.get_disk()) env["MARATHON_APP_LABELS"] = "" env["MARATHON_APP_ID"] = "/simulated_marathon_app_id" env["MARATHON_HOST"] = hostname return env def check_if_port_free(port): temp_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: temp_socket.bind(("127.0.0.1", port)) except socket.error: return False finally: temp_socket.close() return True def decrypt_secret_environment_variables( secret_provider_name, environment, soa_dir, service_name, cluster_name, secret_provider_kwargs, ): decrypted_secrets = {} service_secret_env = {} shared_secret_env = {} for k, v in environment.items(): if is_secret_ref(v): if is_shared_secret(v): shared_secret_env[k] = v else: service_secret_env[k] = v provider_args = { "secret_provider_name": secret_provider_name, "soa_dir": soa_dir, "cluster_name": cluster_name, "secret_provider_kwargs": secret_provider_kwargs, } secret_provider_kwargs["vault_num_uses"] = len(service_secret_env) + len( shared_secret_env ) try: decrypted_secrets.update( decrypt_secret_environment_for_service( service_secret_env, service_name, **provider_args ) ) decrypted_secrets.update( decrypt_secret_environment_for_service( shared_secret_env, SHARED_SECRET_SERVICE, **provider_args ) ) except Exception as e: paasta_print(f"Failed to retrieve secrets with {e.__class__.__name__}: {e}") paasta_print( "If you don't need the secrets for local-run, you can add --skip-secrets" ) sys.exit(1) return decrypted_secrets def decrypt_secret_environment_for_service( secret_env_vars, service_name, secret_provider_name, soa_dir, cluster_name, secret_provider_kwargs, ): if not secret_env_vars: return {} secret_provider = get_secret_provider( secret_provider_name=secret_provider_name, soa_dir=soa_dir, service_name=service_name, cluster_names=[cluster_name], secret_provider_kwargs=secret_provider_kwargs, ) return secret_provider.decrypt_environment(secret_env_vars) def run_docker_container( docker_client, service, instance, docker_url, volumes, interactive, command, healthcheck, healthcheck_only, user_port, instance_config, secret_provider_name, soa_dir=DEFAULT_SOA_DIR, dry_run=False, json_dict=False, framework=None, secret_provider_kwargs={}, skip_secrets=False, ): """docker-py has issues running a container with a TTY attached, so for consistency we execute 'docker run' directly in both interactive and non-interactive modes. In non-interactive mode when the run is complete, stop the container and remove it (with docker-py). """ if user_port: if check_if_port_free(user_port): chosen_port = user_port else: paasta_print( PaastaColors.red( "The chosen port is already in use!\n" "Try specifying another one, or omit (--port|-o) and paasta will find a free one for you" ), file=sys.stderr, ) sys.exit(1) else: chosen_port = pick_random_port(service) environment = instance_config.get_env_dictionary() if not skip_secrets: secret_environment = decrypt_secret_environment_variables( secret_provider_name=secret_provider_name, environment=environment, soa_dir=soa_dir, service_name=service, cluster_name=instance_config.cluster, secret_provider_kwargs=secret_provider_kwargs, ) environment.update(secret_environment) local_run_environment = get_local_run_environment_vars( instance_config=instance_config, port0=chosen_port, framework=framework ) environment.update(local_run_environment) net = instance_config.get_net() memory = instance_config.get_mem() container_name = get_container_name() docker_params = instance_config.format_docker_parameters() healthcheck_mode, healthcheck_data = get_healthcheck_for_instance( service, instance, instance_config, chosen_port, soa_dir=soa_dir ) if healthcheck_mode is None: container_port = None interactive = True elif not user_port and not healthcheck and not healthcheck_only: container_port = None else: try: container_port = instance_config.get_container_port() except AttributeError: container_port = None simulate_healthcheck = ( healthcheck_only or healthcheck ) and healthcheck_mode is not None docker_run_args = dict( memory=memory, chosen_port=chosen_port, container_port=container_port, container_name=container_name, volumes=volumes, env=environment, interactive=interactive, detach=simulate_healthcheck, docker_hash=docker_url, command=command, net=net, docker_params=docker_params, ) docker_run_cmd = get_docker_run_cmd(**docker_run_args) joined_docker_run_cmd = " ".join(docker_run_cmd) if dry_run: if json_dict: paasta_print(json.dumps(docker_run_args)) else: paasta_print(json.dumps(docker_run_cmd)) return 0 else: paasta_print( "Running docker command:\n%s" % PaastaColors.grey(joined_docker_run_cmd) ) merged_env = {**os.environ, **environment} if interactive or not simulate_healthcheck: # NOTE: This immediately replaces us with the docker run cmd. Docker # run knows how to clean up the running container in this situation. wrapper_path = shutil.which("paasta_docker_wrapper") # To properly simulate mesos, we pop the PATH, which is not available to # The executor merged_env.pop("PATH") execlpe(wrapper_path, *docker_run_cmd, merged_env) # For testing, when execlpe is patched out and doesn't replace us, we # still want to bail out. return 0 container_started = False container_id = None try: (returncode, output) = _run(docker_run_cmd, env=merged_env) if returncode != 0: paasta_print( "Failure trying to start your container!" "Returncode: %d" "Output:" "%s" "" "Fix that problem and try again." "http://y/paasta-troubleshooting" % (returncode, output), sep="\n", ) # Container failed to start so no need to cleanup; just bail. sys.exit(1) container_started = True container_id = get_container_id(docker_client, container_name) paasta_print("Found our container running with CID %s" % container_id) if simulate_healthcheck: healthcheck_result = simulate_healthcheck_on_service( instance_config=instance_config, docker_client=docker_client, container_id=container_id, healthcheck_mode=healthcheck_mode, healthcheck_data=healthcheck_data, healthcheck_enabled=healthcheck, ) def _output_exit_code(): returncode = docker_client.inspect_container(container_id)["State"][ "ExitCode" ] paasta_print(f"Container exited: {returncode})") if healthcheck_only: if container_started: _output_exit_code() _cleanup_container(docker_client, container_id) if healthcheck_mode is None: paasta_print( "--healthcheck-only, but no healthcheck is defined for this instance!" ) sys.exit(1) elif healthcheck_result is True: sys.exit(0) else: sys.exit(1) running = docker_client.inspect_container(container_id)["State"]["Running"] if running: paasta_print("Your service is now running! Tailing stdout and stderr:") for line in docker_client.attach( container_id, stderr=True, stream=True, logs=True ): paasta_print(line) else: _output_exit_code() returncode = 3 except KeyboardInterrupt: returncode = 3 # Cleanup if the container exits on its own or interrupted. if container_started: returncode = docker_client.inspect_container(container_id)["State"]["ExitCode"] _cleanup_container(docker_client, container_id) return returncode def format_command_for_type(command, instance_type, date): """ Given an instance_type, return a function that appropriately formats the command to be run. """ if instance_type == "tron": interpolated_command = parse_time_variables(command, date) return interpolated_command else: return command def configure_and_run_docker_container( docker_client, docker_url, docker_sha, service, instance, cluster, system_paasta_config, args, pull_image=False, dry_run=False, ): """ Run Docker container by image hash with args set in command line. Function prints the output of run command in stdout. """ if instance is None and args.healthcheck_only: paasta_print( "With --healthcheck-only, --instance MUST be provided!", file=sys.stderr ) return 1 if instance is None and not sys.stdin.isatty(): paasta_print( "--instance and --cluster must be specified when using paasta local-run without a tty!", file=sys.stderr, ) return 1 soa_dir = args.yelpsoa_config_root volumes = list() load_deployments = (docker_url is None or pull_image) and not docker_sha interactive = args.interactive try: if instance is None: instance_type = "adhoc" instance = "interactive" instance_config = get_default_interactive_config( service=service, cluster=cluster, soa_dir=soa_dir, load_deployments=load_deployments, ) interactive = True else: instance_type = validate_service_instance( service, instance, cluster, soa_dir ) instance_config = get_instance_config( service=service, instance=instance, cluster=cluster, load_deployments=load_deployments, soa_dir=soa_dir, ) except NoConfigurationForServiceError as e: paasta_print(str(e), file=sys.stderr) return 1 except NoDeploymentsAvailable: paasta_print( PaastaColors.red( "Error: No deployments.json found in %(soa_dir)s/%(service)s. " "You can generate this by running: " "generate_deployments_for_service -d %(soa_dir)s -s %(service)s" % {"soa_dir": soa_dir, "service": service} ), sep="\n", file=sys.stderr, ) return 1 if docker_sha is not None: instance_config.branch_dict = { "git_sha": docker_sha, "docker_image": build_docker_image_name(service=service, sha=docker_sha), "desired_state": "start", "force_bounce": None, } if docker_url is None: try: docker_url = instance_config.get_docker_url() except NoDockerImageError: if instance_config.get_deploy_group() is None: paasta_print( PaastaColors.red( f"Error: {service}.{instance} has no 'deploy_group' set. Please set one so " "the proper image can be used to run for this service." ), sep="", file=sys.stderr, ) else: paasta_print( PaastaColors.red( "Error: No sha has been marked for deployment for the %s deploy group.\n" "Please ensure this service has either run through a jenkins pipeline " "or paasta mark-for-deployment has been run for %s\n" % (instance_config.get_deploy_group(), service) ), sep="", file=sys.stderr, ) return 1 if pull_image: docker_pull_image(docker_url) for volume in instance_config.get_volumes(system_paasta_config.get_volumes()): if os.path.exists(volume["hostPath"]): volumes.append( "{}:{}:{}".format( volume["hostPath"], volume["containerPath"], volume["mode"].lower() ) ) else: paasta_print( PaastaColors.yellow( "Warning: Path %s does not exist on this host. Skipping this binding." % volume["hostPath"] ), file=sys.stderr, ) if interactive is True and args.cmd is None: command = "bash" elif args.cmd: command = args.cmd else: command_from_config = instance_config.get_cmd() if command_from_config: command = format_command_for_type( command=command_from_config, instance_type=instance_type, date=args.date ) else: command = instance_config.get_args() secret_provider_kwargs = { "vault_cluster_config": system_paasta_config.get_vault_cluster_config(), "vault_auth_method": args.vault_auth_method, "vault_token_file": args.vault_token_file, } return run_docker_container( docker_client=docker_client, service=service, instance=instance, docker_url=docker_url, volumes=volumes, interactive=interactive, command=command, healthcheck=args.healthcheck, healthcheck_only=args.healthcheck_only, user_port=args.user_port, instance_config=instance_config, soa_dir=args.yelpsoa_config_root, dry_run=dry_run, json_dict=args.dry_run_json_dict, framework=instance_type, secret_provider_name=system_paasta_config.get_secret_provider_name(), secret_provider_kwargs=secret_provider_kwargs, skip_secrets=args.skip_secrets, ) def docker_config_available(): home = os.path.expanduser("~") oldconfig = os.path.join(home, ".dockercfg") newconfig = os.path.join(home, ".docker", "config.json") return (os.path.isfile(oldconfig) and os.access(oldconfig, os.R_OK)) or ( os.path.isfile(newconfig) and os.access(newconfig, os.R_OK) ) def paasta_local_run(args): if args.action == "pull" and os.geteuid() != 0 and not docker_config_available(): paasta_print("Re-executing paasta local-run --pull with sudo..") os.execvp("sudo", ["sudo", "-H"] + sys.argv) if args.action == "build" and not makefile_responds_to("cook-image"): paasta_print( "A local Makefile with a 'cook-image' target is required for --build", file=sys.stderr, ) paasta_print( "If you meant to pull the docker image from the registry, explicitly pass --pull", file=sys.stderr, ) return 1 try: system_paasta_config = load_system_paasta_config() except PaastaNotConfiguredError: paasta_print( PaastaColors.yellow( "Warning: Couldn't load config files from '/etc/paasta'. This indicates" "PaaSTA is not configured locally on this host, and local-run may not behave" "the same way it would behave on a server configured for PaaSTA." ), sep="\n", ) system_paasta_config = SystemPaastaConfig({"volumes": []}, "/etc/paasta") local_run_config = system_paasta_config.get_local_run_config() service = figure_out_service_name(args, soa_dir=args.yelpsoa_config_root) if args.cluster: cluster = args.cluster else: try: cluster = local_run_config["default_cluster"] except KeyError: paasta_print( PaastaColors.red( "PaaSTA on this machine has not been configured with a default cluster." "Please pass one to local-run using '-c'." ), sep="\n", file=sys.stderr, ) return 1 instance = args.instance docker_client = get_docker_client() docker_sha = None docker_url = None if args.action == "build": default_tag = "paasta-local-run-{}-{}".format(service, get_username()) docker_url = os.environ.get("DOCKER_TAG", default_tag) os.environ["DOCKER_TAG"] = docker_url pull_image = False cook_return = paasta_cook_image( args=None, service=service, soa_dir=args.yelpsoa_config_root ) if cook_return != 0: return cook_return elif args.action == "dry_run": pull_image = False docker_url = None docker_sha = args.sha else: pull_image = True docker_url = None docker_sha = args.sha try: return configure_and_run_docker_container( docker_client=docker_client, docker_url=docker_url, docker_sha=docker_sha, service=service, instance=instance, cluster=cluster, args=args, pull_image=pull_image, system_paasta_config=system_paasta_config, dry_run=args.action == "dry_run", ) except errors.APIError as e: paasta_print("Can't run Docker container. Error: %s" % str(e), file=sys.stderr) return 1
#!/usr/bin/env python # Copyright 2015-2016 Yelp Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import json import os import shutil import socket import sys import threading import time import uuid from os import execlpe from random import randint from urllib.parse import urlparse import requests from docker import errors from paasta_tools.adhoc_tools import get_default_interactive_config from paasta_tools.cli.cmds.check import makefile_responds_to from paasta_tools.cli.cmds.cook_image import paasta_cook_image from paasta_tools.cli.utils import figure_out_service_name from paasta_tools.cli.utils import get_instance_config from paasta_tools.cli.utils import lazy_choices_completer from paasta_tools.cli.utils import list_instances from paasta_tools.cli.utils import pick_random_port from paasta_tools.generate_deployments_for_service import build_docker_image_name from paasta_tools.long_running_service_tools import get_healthcheck_for_instance from paasta_tools.paasta_execute_docker_command import execute_in_container from paasta_tools.secret_tools import get_secret_provider from paasta_tools.secret_tools import is_secret_ref from paasta_tools.secret_tools import is_shared_secret from paasta_tools.secret_tools import SHARED_SECRET_SERVICE from paasta_tools.tron_tools import parse_time_variables from paasta_tools.utils import _run from paasta_tools.utils import DEFAULT_SOA_DIR from paasta_tools.utils import get_docker_client from paasta_tools.utils import get_possible_launched_by_user_variable_from_env from paasta_tools.utils import get_username from paasta_tools.utils import list_clusters from paasta_tools.utils import list_services from paasta_tools.utils import load_system_paasta_config from paasta_tools.utils import NoConfigurationForServiceError from paasta_tools.utils import NoDeploymentsAvailable from paasta_tools.utils import NoDockerImageError from paasta_tools.utils import paasta_print from paasta_tools.utils import PaastaColors from paasta_tools.utils import PaastaNotConfiguredError from paasta_tools.utils import SystemPaastaConfig from paasta_tools.utils import timed_flock from paasta_tools.utils import Timeout from paasta_tools.utils import TimeoutError from paasta_tools.utils import validate_service_instance def parse_date(date_string): return datetime.datetime.strptime(date_string, "%Y-%m-%d") def perform_http_healthcheck(url, timeout): """Returns true if healthcheck on url succeeds, false otherwise :param url: the healthcheck url :param timeout: timeout in seconds :returns: True if healthcheck succeeds within number of seconds specified by timeout, false otherwise """ try: with Timeout(seconds=timeout): try: res = requests.get(url, verify=False) except requests.ConnectionError: return (False, "http request failed: connection failed") except TimeoutError: return (False, "http request timed out after %d seconds" % timeout) if "content-type" in res.headers and "," in res.headers["content-type"]: paasta_print( PaastaColors.yellow( "Multiple content-type headers detected in response." " The Mesos healthcheck system will treat this as a failure!" ) ) return (False, "http request succeeded, code %d" % res.status_code) # check if response code is valid per https://mesosphere.github.io/marathon/docs/health-checks.html elif res.status_code >= 200 and res.status_code < 400: return (True, "http request succeeded, code %d" % res.status_code) else: return (False, "http request failed, code %s" % str(res.status_code)) def perform_tcp_healthcheck(url, timeout): """Returns true if successfully connects to host and port, false otherwise :param url: the healthcheck url (in the form tcp://host:port) :param timeout: timeout in seconds :returns: True if healthcheck succeeds within number of seconds specified by timeout, false otherwise """ url_elem = urlparse(url) sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.settimeout(timeout) result = sock.connect_ex((url_elem.hostname, url_elem.port)) sock.close() if result == 0: return (True, "tcp connection succeeded") else: return (False, "%s (timeout %d seconds)" % (os.strerror(result), timeout)) def perform_cmd_healthcheck(docker_client, container_id, command, timeout): """Returns true if return code of command is 0 when executed inside container, false otherwise :param docker_client: Docker client object :param container_id: Docker container id :param command: command to execute :param timeout: timeout in seconds :returns: True if command exits with return code 0, false otherwise """ (output, return_code) = execute_in_container( docker_client, container_id, command, timeout ) if return_code == 0: return (True, output) else: return (False, output) def run_healthcheck_on_container( docker_client, container_id, healthcheck_mode, healthcheck_data, timeout ): """Performs healthcheck on a container :param container_id: Docker container id :param healthcheck_mode: one of 'http', 'https', 'tcp', or 'cmd' :param healthcheck_data: a URL when healthcheck_mode is 'http[s]' or 'tcp', a command if healthcheck_mode is 'cmd' :param timeout: timeout in seconds for individual check :returns: a tuple of (bool, output string) """ healthcheck_result = (False, "unknown") if healthcheck_mode == "cmd": healthcheck_result = perform_cmd_healthcheck( docker_client, container_id, healthcheck_data, timeout ) elif healthcheck_mode == "http" or healthcheck_mode == "https": healthcheck_result = perform_http_healthcheck(healthcheck_data, timeout) elif healthcheck_mode == "tcp": healthcheck_result = perform_tcp_healthcheck(healthcheck_data, timeout) else: paasta_print( PaastaColors.yellow( "Healthcheck mode '%s' is not currently supported!" % healthcheck_mode ) ) sys.exit(1) return healthcheck_result def simulate_healthcheck_on_service( instance_config, docker_client, container_id, healthcheck_mode, healthcheck_data, healthcheck_enabled, ): """Simulates Marathon-style healthcheck on given service if healthcheck is enabled :param instance_config: service manifest :param docker_client: Docker client object :param container_id: Docker container id :param healthcheck_data: tuple url to healthcheck :param healthcheck_enabled: boolean :returns: healthcheck_passed: boolean """ healthcheck_link = PaastaColors.cyan(healthcheck_data) if healthcheck_enabled: grace_period = instance_config.get_healthcheck_grace_period_seconds() timeout = instance_config.get_healthcheck_timeout_seconds() interval = instance_config.get_healthcheck_interval_seconds() max_failures = instance_config.get_healthcheck_max_consecutive_failures() paasta_print( "\nStarting health check via %s (waiting %s seconds before " "considering failures due to grace period):" % (healthcheck_link, grace_period) ) # silently start performing health checks until grace period ends or first check succeeds graceperiod_end_time = time.time() + grace_period after_grace_period_attempts = 0 healthchecking = True def _stream_docker_logs(container_id, generator): while healthchecking: try: # the generator will block until another log line is available log_line = next(generator).decode("utf-8").rstrip("\n") if healthchecking: paasta_print(f"container [{container_id[:12]}]: {log_line}") else: # stop streaming at first opportunity, since generator.close() # cant be used until the container is dead break except StopIteration: # natural end of logs break docker_logs_generator = docker_client.logs( container_id, stderr=True, stream=True ) threading.Thread( target=_stream_docker_logs, daemon=True, args=(container_id, docker_logs_generator), ).start() while True: # First inspect the container for early exits container_state = docker_client.inspect_container(container_id) if not container_state["State"]["Running"]: paasta_print( PaastaColors.red( "Container exited with code {}".format( container_state["State"]["ExitCode"] ) ) ) healthcheck_passed = False break healthcheck_passed, healthcheck_output = run_healthcheck_on_container( docker_client, container_id, healthcheck_mode, healthcheck_data, timeout ) # Yay, we passed the healthcheck if healthcheck_passed: paasta_print( "{}'{}' (via {})".format( PaastaColors.green("Healthcheck succeeded!: "), healthcheck_output, healthcheck_link, ) ) break # Otherwise, print why we failed if time.time() < graceperiod_end_time: color = PaastaColors.grey msg = "(disregarded due to grace period)" extra_msg = f" (via: {healthcheck_link}. Output: {healthcheck_output})" else: # If we've exceeded the grace period, we start incrementing attempts after_grace_period_attempts += 1 color = PaastaColors.red msg = "(Attempt {} of {})".format( after_grace_period_attempts, max_failures ) extra_msg = f" (via: {healthcheck_link}. Output: {healthcheck_output})" paasta_print("{}{}".format(color(f"Healthcheck failed! {msg}"), extra_msg)) if after_grace_period_attempts == max_failures: break time.sleep(interval) healthchecking = False # end docker logs stream else: paasta_print( "\nPaaSTA would have healthchecked your service via\n%s" % healthcheck_link ) healthcheck_passed = True return healthcheck_passed def read_local_dockerfile_lines(): dockerfile = os.path.join(os.getcwd(), "Dockerfile") return open(dockerfile).readlines() def add_subparser(subparsers): list_parser = subparsers.add_parser( "local-run", help="Run service's Docker image locally", description=( "'paasta local-run' is useful for simulating how a PaaSTA service would be " "executed on a real cluster. It analyzes the local soa-configs and constructs " "a 'docker run' invocation to match. This is useful as a type of end-to-end " "test, ensuring that a service will work inside the docker container as expected. " "Additionally, 'local-run' can healthcheck a service per the configured healthcheck.\n\n" "Alternatively, 'local-run' can be used with --pull, which will pull the currently " "deployed docker image and use it, instead of building one." ), epilog=( "Note: 'paasta local-run' uses docker commands, which may require elevated privileges " "to run (sudo)." ), ) list_parser.add_argument( "-s", "--service", help="The name of the service you wish to inspect" ).completer = lazy_choices_completer(list_services) list_parser.add_argument( "-c", "--cluster", help=( "The name of the cluster you wish to simulate. " "If omitted, uses the default cluster defined in the paasta local-run configs" ), ).completer = lazy_choices_completer(list_clusters) list_parser.add_argument( "-y", "--yelpsoa-config-root", dest="yelpsoa_config_root", help="A directory from which yelpsoa-configs should be read from", default=DEFAULT_SOA_DIR, ) build_pull_group = list_parser.add_mutually_exclusive_group() build_pull_group.add_argument( "-b", "--build", help=( "Build the docker image to run from scratch using the local Makefile's " "'cook-image' target. Defaults to try to use the local Makefile if present." ), action="store_const", const="build", dest="action", ) build_pull_group.add_argument( "-p", "--pull", help=( "Pull the docker image marked for deployment from the Docker registry and " "use that for the local-run. This is the opposite of --build." ), action="store_const", const="pull", dest="action", ) build_pull_group.add_argument( "-d", "--dry-run", help="Shows the arguments supplied to docker as json.", action="store_const", const="dry_run", dest="action", ) build_pull_group.set_defaults(action="build") list_parser.add_argument( "--json-dict", help="When running dry run, output the arguments as a json dict", action="store_true", dest="dry_run_json_dict", ) list_parser.add_argument( "-C", "--cmd", help=( "Run Docker container with particular command, " 'for example: "bash". By default will use the command or args specified by the ' "soa-configs or what was specified in the Dockerfile" ), required=False, default=None, ) list_parser.add_argument( "-i", "--instance", help=( "Simulate a docker run for a particular instance of the service, like 'main' or 'canary'. " "NOTE: if you don't specify an instance, PaaSTA will run in interactive mode" ), required=False, default=None, ).completer = lazy_choices_completer(list_instances) list_parser.add_argument( "--date", default=datetime.datetime.today().strftime("%Y-%m-%d"), help="Date to use for interpolating date variables in a job. Defaults to use %(default)s.", type=parse_date, ) list_parser.add_argument( "-v", "--verbose", help="Show Docker commands output", action="store_true", required=False, default=True, ) list_parser.add_argument( "-I", "--interactive", help=( 'Run container in interactive mode. If interactive is set the default command will be "bash" ' 'unless otherwise set by the "--cmd" flag' ), action="store_true", required=False, default=False, ) list_parser.add_argument( "-k", "--no-healthcheck", help="Disable simulated healthcheck", dest="healthcheck", action="store_false", required=False, default=True, ) list_parser.add_argument( "-t", "--healthcheck-only", help="Terminates container after healthcheck (exits with status code 0 on success, 1 otherwise)", dest="healthcheck_only", action="store_true", required=False, default=False, ) list_parser.add_argument( "-o", "--port", help="Specify a port number to use. If not set, a random non-conflicting port will be found.", type=int, dest="user_port", required=False, default=False, ) list_parser.add_argument( "--vault-auth-method", help="Override how we auth with vault, defaults to token if not present", type=str, dest="vault_auth_method", required=False, default="token", choices=["token", "ldap"], ) list_parser.add_argument( "--vault-token-file", help="Override vault token file, defaults to %(default)s", type=str, dest="vault_token_file", required=False, default="/var/spool/.paasta_vault_token", ) list_parser.add_argument( "--skip-secrets", help="Skip decrypting secrets, useful if running non-interactively", dest="skip_secrets", required=False, action="store_true", default=False, ) list_parser.add_argument( "--sha", help=( "SHA to run instead of the currently marked-for-deployment SHA. Ignored when used with --build." " Must be a version that exists in the registry, i.e. it has been built by Jenkins." ), type=str, dest="sha", required=False, default=None, ) list_parser.set_defaults(command=paasta_local_run) def get_container_name(): return "paasta_local_run_{}_{}".format(get_username(), randint(1, 999999)) def get_docker_run_cmd( memory, chosen_port, container_port, container_name, volumes, env, interactive, docker_hash, command, net, docker_params, detach, ): cmd = ["paasta_docker_wrapper", "run"] for k in env.keys(): cmd.append("--env") cmd.append(f"{k}") cmd.append("--memory=%dm" % memory) for i in docker_params: cmd.append(f"--{i['key']}={i['value']}") if net == "bridge" and container_port is not None: cmd.append("--publish=%d:%d" % (chosen_port, container_port)) elif net == "host": cmd.append("--net=host") cmd.append("--name=%s" % container_name) for volume in volumes: cmd.append("--volume=%s" % volume) if interactive: cmd.append("--interactive=true") if sys.stdin.isatty(): cmd.append("--tty=true") else: if detach: cmd.append("--detach=true") cmd.append("%s" % docker_hash) if command: if isinstance(command, str): cmd.extend(("sh", "-c", command)) else: cmd.extend(command) return cmd class LostContainerException(Exception): pass def docker_pull_image(docker_url): """Pull an image via ``docker pull``. Uses the actual pull command instead of the python bindings due to the docker auth/registry transition. Once we are past Docker 1.6 we can use better credential management, but for now this function assumes the user running the command has already been authorized for the registry""" paasta_print( "Please wait while the image (%s) is pulled (times out after 30m)..." % docker_url, file=sys.stderr, ) DEVNULL = open(os.devnull, "wb") with open("/tmp/paasta-local-run-pull.lock", "w") as f: with timed_flock(f, seconds=1800): ret, output = _run( "docker pull %s" % docker_url, stream=True, stdin=DEVNULL ) if ret != 0: paasta_print( "\nPull failed. Are you authorized to run docker commands?", file=sys.stderr, ) sys.exit(ret) def get_container_id(docker_client, container_name): """Use 'docker_client' to find the container we started, identifiable by its 'container_name'. If we can't find the id, raise LostContainerException. """ containers = docker_client.containers(all=False) for container in containers: if "/%s" % container_name in container.get("Names", []): return container.get("Id") raise LostContainerException( "Can't find the container I just launched so I can't do anything else.\n" "Try docker 'ps --all | grep %s' to see where it went.\n" "Here were all the containers:\n" "%s" % (container_name, containers) ) def _cleanup_container(docker_client, container_id): if docker_client.inspect_container(container_id)["State"].get("OOMKilled", False): paasta_print( PaastaColors.red( "Your service was killed by the OOM Killer!\n" "You've exceeded the memory limit, try increasing the mem parameter in your soa_configs" ), file=sys.stderr, ) paasta_print("\nStopping and removing the old container %s..." % container_id) paasta_print("(Please wait or you may leave an orphaned container.)") try: docker_client.stop(container_id) docker_client.remove_container(container_id) paasta_print("...done") except errors.APIError: paasta_print( PaastaColors.yellow( "Could not clean up container! You should stop and remove container '%s' manually." % container_id ) ) def get_local_run_environment_vars(instance_config, port0, framework): """Returns a dictionary of environment variables to simulate what would be available to a paasta service running in a container""" hostname = socket.getfqdn() docker_image = instance_config.get_docker_image() if docker_image == "": # In a local_run environment, the docker_image may not be available # so we can fall-back to the injected DOCKER_TAG per the paasta contract docker_image = os.environ["DOCKER_TAG"] fake_taskid = uuid.uuid4() env = { "HOST": hostname, "MESOS_SANDBOX": "/mnt/mesos/sandbox", "MESOS_CONTAINER_NAME": "localrun-%s" % fake_taskid, "MESOS_TASK_ID": str(fake_taskid), "PAASTA_DOCKER_IMAGE": docker_image, "PAASTA_LAUNCHED_BY": get_possible_launched_by_user_variable_from_env(), } if framework == "marathon": env["MARATHON_PORT"] = str(port0) env["MARATHON_PORT0"] = str(port0) env["MARATHON_PORTS"] = str(port0) env["MARATHON_PORT_%d" % instance_config.get_container_port()] = str(port0) env["MARATHON_APP_VERSION"] = "simulated_marathon_app_version" env["MARATHON_APP_RESOURCE_CPUS"] = str(instance_config.get_cpus()) env["MARATHON_APP_DOCKER_IMAGE"] = docker_image env["MARATHON_APP_RESOURCE_MEM"] = str(instance_config.get_mem()) env["MARATHON_APP_RESOURCE_DISK"] = str(instance_config.get_disk()) env["MARATHON_APP_LABELS"] = "" env["MARATHON_APP_ID"] = "/simulated_marathon_app_id" env["MARATHON_HOST"] = hostname return env def check_if_port_free(port): temp_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: temp_socket.bind(("127.0.0.1", port)) except socket.error: return False finally: temp_socket.close() return True def decrypt_secret_environment_variables( secret_provider_name, environment, soa_dir, service_name, cluster_name, secret_provider_kwargs, ): decrypted_secrets = {} service_secret_env = {} shared_secret_env = {} for k, v in environment.items(): if is_secret_ref(v): if is_shared_secret(v): shared_secret_env[k] = v else: service_secret_env[k] = v provider_args = { "secret_provider_name": secret_provider_name, "soa_dir": soa_dir, "cluster_name": cluster_name, "secret_provider_kwargs": secret_provider_kwargs, } secret_provider_kwargs["vault_num_uses"] = len(service_secret_env) + len( shared_secret_env ) try: decrypted_secrets.update( decrypt_secret_environment_for_service( service_secret_env, service_name, **provider_args ) ) decrypted_secrets.update( decrypt_secret_environment_for_service( shared_secret_env, SHARED_SECRET_SERVICE, **provider_args ) ) except Exception as e: paasta_print(f"Failed to retrieve secrets with {e.__class__.__name__}: {e}") paasta_print( "If you don't need the secrets for local-run, you can add --skip-secrets" ) sys.exit(1) return decrypted_secrets def decrypt_secret_environment_for_service( secret_env_vars, service_name, secret_provider_name, soa_dir, cluster_name, secret_provider_kwargs, ): if not secret_env_vars: return {} secret_provider = get_secret_provider( secret_provider_name=secret_provider_name, soa_dir=soa_dir, service_name=service_name, cluster_names=[cluster_name], secret_provider_kwargs=secret_provider_kwargs, ) return secret_provider.decrypt_environment(secret_env_vars) def run_docker_container( docker_client, service, instance, docker_url, volumes, interactive, command, healthcheck, healthcheck_only, user_port, instance_config, secret_provider_name, soa_dir=DEFAULT_SOA_DIR, dry_run=False, json_dict=False, framework=None, secret_provider_kwargs={}, skip_secrets=False, ): """docker-py has issues running a container with a TTY attached, so for consistency we execute 'docker run' directly in both interactive and non-interactive modes. In non-interactive mode when the run is complete, stop the container and remove it (with docker-py). """ if user_port: if check_if_port_free(user_port): chosen_port = user_port else: paasta_print( PaastaColors.red( "The chosen port is already in use!\n" "Try specifying another one, or omit (--port|-o) and paasta will find a free one for you" ), file=sys.stderr, ) sys.exit(1) else: chosen_port = pick_random_port(service) environment = instance_config.get_env_dictionary() if not skip_secrets: secret_environment = decrypt_secret_environment_variables( secret_provider_name=secret_provider_name, environment=environment, soa_dir=soa_dir, service_name=service, cluster_name=instance_config.cluster, secret_provider_kwargs=secret_provider_kwargs, ) environment.update(secret_environment) local_run_environment = get_local_run_environment_vars( instance_config=instance_config, port0=chosen_port, framework=framework ) environment.update(local_run_environment) net = instance_config.get_net() memory = instance_config.get_mem() container_name = get_container_name() docker_params = instance_config.format_docker_parameters() healthcheck_mode, healthcheck_data = get_healthcheck_for_instance( service, instance, instance_config, chosen_port, soa_dir=soa_dir ) if healthcheck_mode is None: container_port = None interactive = True elif not user_port and not healthcheck and not healthcheck_only: container_port = None else: try: container_port = instance_config.get_container_port() except AttributeError: container_port = None simulate_healthcheck = ( healthcheck_only or healthcheck ) and healthcheck_mode is not None docker_run_args = dict( memory=memory, chosen_port=chosen_port, container_port=container_port, container_name=container_name, volumes=volumes, env=environment, interactive=interactive, detach=simulate_healthcheck, docker_hash=docker_url, command=command, net=net, docker_params=docker_params, ) docker_run_cmd = get_docker_run_cmd(**docker_run_args) joined_docker_run_cmd = " ".join(docker_run_cmd) if dry_run: if json_dict: paasta_print(json.dumps(docker_run_args)) else: paasta_print(json.dumps(docker_run_cmd)) return 0 else: paasta_print( "Running docker command:\n%s" % PaastaColors.grey(joined_docker_run_cmd) ) merged_env = {**os.environ, **environment} if interactive or not simulate_healthcheck: # NOTE: This immediately replaces us with the docker run cmd. Docker # run knows how to clean up the running container in this situation. wrapper_path = shutil.which("paasta_docker_wrapper") # To properly simulate mesos, we pop the PATH, which is not available to # The executor merged_env.pop("PATH") execlpe(wrapper_path, *docker_run_cmd, merged_env) # For testing, when execlpe is patched out and doesn't replace us, we # still want to bail out. return 0 container_started = False container_id = None try: (returncode, output) = _run(docker_run_cmd, env=merged_env) if returncode != 0: paasta_print( "Failure trying to start your container!" "Returncode: %d" "Output:" "%s" "" "Fix that problem and try again." "http://y/paasta-troubleshooting" % (returncode, output), sep="\n", ) # Container failed to start so no need to cleanup; just bail. sys.exit(1) container_started = True container_id = get_container_id(docker_client, container_name) paasta_print("Found our container running with CID %s" % container_id) if simulate_healthcheck: healthcheck_result = simulate_healthcheck_on_service( instance_config=instance_config, docker_client=docker_client, container_id=container_id, healthcheck_mode=healthcheck_mode, healthcheck_data=healthcheck_data, healthcheck_enabled=healthcheck, ) def _output_exit_code(): returncode = docker_client.inspect_container(container_id)["State"][ "ExitCode" ] paasta_print(f"Container exited: {returncode})") if healthcheck_only: if container_started: _output_exit_code() _cleanup_container(docker_client, container_id) if healthcheck_mode is None: paasta_print( "--healthcheck-only, but no healthcheck is defined for this instance!" ) sys.exit(1) elif healthcheck_result is True: sys.exit(0) else: sys.exit(1) running = docker_client.inspect_container(container_id)["State"]["Running"] if running: paasta_print("Your service is now running! Tailing stdout and stderr:") for line in docker_client.attach( container_id, stderr=True, stream=True, logs=True ): paasta_print(line) else: _output_exit_code() returncode = 3 except KeyboardInterrupt: returncode = 3 # Cleanup if the container exits on its own or interrupted. if container_started: returncode = docker_client.inspect_container(container_id)["State"]["ExitCode"] _cleanup_container(docker_client, container_id) return returncode def format_command_for_type(command, instance_type, date): """ Given an instance_type, return a function that appropriately formats the command to be run. """ if instance_type == "tron": interpolated_command = parse_time_variables(command, date) return interpolated_command else: return command def configure_and_run_docker_container( docker_client, docker_url, docker_sha, service, instance, cluster, system_paasta_config, args, pull_image=False, dry_run=False, ): """ Run Docker container by image hash with args set in command line. Function prints the output of run command in stdout. """ if instance is None and args.healthcheck_only: paasta_print( "With --healthcheck-only, --instance MUST be provided!", file=sys.stderr ) return 1 if instance is None and not sys.stdin.isatty(): paasta_print( "--instance and --cluster must be specified when using paasta local-run without a tty!", file=sys.stderr, ) return 1 soa_dir = args.yelpsoa_config_root volumes = list() load_deployments = (docker_url is None or pull_image) and not docker_sha interactive = args.interactive try: if instance is None: instance_type = "adhoc" instance = "interactive" instance_config = get_default_interactive_config( service=service, cluster=cluster, soa_dir=soa_dir, load_deployments=load_deployments, ) interactive = True else: instance_type = validate_service_instance( service, instance, cluster, soa_dir ) instance_config = get_instance_config( service=service, instance=instance, cluster=cluster, load_deployments=load_deployments, soa_dir=soa_dir, ) except NoConfigurationForServiceError as e: paasta_print(str(e), file=sys.stderr) return 1 except NoDeploymentsAvailable: paasta_print( PaastaColors.red( "Error: No deployments.json found in %(soa_dir)s/%(service)s. " "You can generate this by running: " "generate_deployments_for_service -d %(soa_dir)s -s %(service)s" % {"soa_dir": soa_dir, "service": service} ), sep="\n", file=sys.stderr, ) return 1 if docker_sha is not None: instance_config.branch_dict = { "git_sha": docker_sha, "docker_image": build_docker_image_name(service=service, sha=docker_sha), "desired_state": "start", "force_bounce": None, } if docker_url is None: try: docker_url = instance_config.get_docker_url() except NoDockerImageError: if instance_config.get_deploy_group() is None: paasta_print( PaastaColors.red( f"Error: {service}.{instance} has no 'deploy_group' set. Please set one so " "the proper image can be used to run for this service." ), sep="", file=sys.stderr, ) else: paasta_print( PaastaColors.red( "Error: No sha has been marked for deployment for the %s deploy group.\n" "Please ensure this service has either run through a jenkins pipeline " "or paasta mark-for-deployment has been run for %s\n" % (instance_config.get_deploy_group(), service) ), sep="", file=sys.stderr, ) return 1 if pull_image: docker_pull_image(docker_url) for volume in instance_config.get_volumes(system_paasta_config.get_volumes()): if os.path.exists(volume["hostPath"]): volumes.append( "{}:{}:{}".format( volume["hostPath"], volume["containerPath"], volume["mode"].lower() ) ) else: paasta_print( PaastaColors.yellow( "Warning: Path %s does not exist on this host. Skipping this binding." % volume["hostPath"] ), file=sys.stderr, ) if interactive is True and args.cmd is None: command = "bash" elif args.cmd: command = args.cmd else: command_from_config = instance_config.get_cmd() if command_from_config: command = format_command_for_type( command=command_from_config, instance_type=instance_type, date=args.date ) else: command = instance_config.get_args() secret_provider_kwargs = { "vault_cluster_config": system_paasta_config.get_vault_cluster_config(), "vault_auth_method": args.vault_auth_method, "vault_token_file": args.vault_token_file, } return run_docker_container( docker_client=docker_client, service=service, instance=instance, docker_url=docker_url, volumes=volumes, interactive=interactive, command=command, healthcheck=args.healthcheck, healthcheck_only=args.healthcheck_only, user_port=args.user_port, instance_config=instance_config, soa_dir=args.yelpsoa_config_root, dry_run=dry_run, json_dict=args.dry_run_json_dict, framework=instance_type, secret_provider_name=system_paasta_config.get_secret_provider_name(), secret_provider_kwargs=secret_provider_kwargs, skip_secrets=args.skip_secrets, ) def docker_config_available(): home = os.path.expanduser("~") oldconfig = os.path.join(home, ".dockercfg") newconfig = os.path.join(home, ".docker", "config.json") return (os.path.isfile(oldconfig) and os.access(oldconfig, os.R_OK)) or ( os.path.isfile(newconfig) and os.access(newconfig, os.R_OK) ) def paasta_local_run(args): if args.action == "pull" and os.geteuid() != 0 and not docker_config_available(): paasta_print("Re-executing paasta local-run --pull with sudo..") os.execvp("sudo", ["sudo", "-H"] + sys.argv) if args.action == "build" and not makefile_responds_to("cook-image"): paasta_print( "A local Makefile with a 'cook-image' target is required for --build", file=sys.stderr, ) paasta_print( "If you meant to pull the docker image from the registry, explicitly pass --pull", file=sys.stderr, ) return 1 try: system_paasta_config = load_system_paasta_config() except PaastaNotConfiguredError: paasta_print( PaastaColors.yellow( "Warning: Couldn't load config files from '/etc/paasta'. This indicates" "PaaSTA is not configured locally on this host, and local-run may not behave" "the same way it would behave on a server configured for PaaSTA." ), sep="\n", ) system_paasta_config = SystemPaastaConfig({"volumes": []}, "/etc/paasta") local_run_config = system_paasta_config.get_local_run_config() service = figure_out_service_name(args, soa_dir=args.yelpsoa_config_root) if args.cluster: cluster = args.cluster else: try: cluster = local_run_config["default_cluster"] except KeyError: paasta_print( PaastaColors.red( "PaaSTA on this machine has not been configured with a default cluster." "Please pass one to local-run using '-c'." ), sep="\n", file=sys.stderr, ) return 1 instance = args.instance docker_client = get_docker_client() docker_sha = None docker_url = None if args.action == "build": default_tag = "paasta-local-run-{}-{}".format(service, get_username()) docker_url = os.environ.get("DOCKER_TAG", default_tag) os.environ["DOCKER_TAG"] = docker_url pull_image = False cook_return = paasta_cook_image( args=None, service=service, soa_dir=args.yelpsoa_config_root ) if cook_return != 0: return cook_return elif args.action == "dry_run": pull_image = False docker_url = None docker_sha = args.sha else: pull_image = True docker_url = None docker_sha = args.sha try: return configure_and_run_docker_container( docker_client=docker_client, docker_url=docker_url, docker_sha=docker_sha, service=service, instance=instance, cluster=cluster, args=args, pull_image=pull_image, system_paasta_config=system_paasta_config, dry_run=args.action == "dry_run", ) except errors.APIError as e: paasta_print("Can't run Docker container. Error: %s" % str(e), file=sys.stderr) return 1
import bson import re import logging from datetime import datetime from functools import reduce from mongoengine import EmbeddedDocumentField, EmbeddedDocument, Document, QuerySet, register_connection from pymongo import ReadPreference from mongoengine.errors import * from spaceone.core import config from spaceone.core import utils from spaceone.core.error import * from spaceone.core.model import BaseModel from spaceone.core.model.mongo_model.filter_operator import FILTER_OPERATORS from spaceone.core.model.mongo_model.stat_operator import STAT_OPERATORS _REFERENCE_ERROR_FORMAT = r'Could not delete document \((\w+)\.\w+ refers to it\)' _MONGO_CONNECTIONS = [] _MONGO_INIT_MODELS = [] _LOGGER = logging.getLogger(__name__) def _raise_reference_error(class_name, message): m = re.findall(_REFERENCE_ERROR_FORMAT, message) if len(m) > 0: raise ERROR_EXIST_RESOURCE(parent=class_name, child=m[0]) class MongoCustomQuerySet(QuerySet): def last(self): return self.order_by('-id').first() def update(self, *args, **kwargs): if len(args) > 0 and isinstance(args[0], dict): kwargs.update(args[0]) super().update(**kwargs) def increment(self, key, amount=1): key = key.replace('.', '__') inc_data = { f'inc__{key}': amount } super().update(**inc_data) def decrement(self, key, amount=1): key = key.replace('.', '__') dec_data = { f'dec__{key}': amount } super().update(**dec_data) def set_data(self, key, data): key = key.replace('.', '__') set_data = { f'set__{key}': data } super().update(**set_data) def unset_data(self, *keys): unset_data = {} for key in keys: key = key.replace('.', '__') unset_data[f'unset__{key}'] = 1 super().update(**unset_data) def append(self, key, data): key = key.replace('.', '__') append_data = { f'push__{key}': data } super().update(**append_data) def remove(self, key, data): key = key.replace('.', '__') remove_data = { f'pull__{key}': data } super().update(**remove_data) class MongoModel(Document, BaseModel): auto_create_index = True case_insensitive_index = False meta = { 'abstract': True, 'queryset_class': MongoCustomQuerySet, 'auto_create_index': False } @classmethod def init(cls): global_conf = config.get_global() if global_conf.get('MOCK_MODE', False) == False: cls.connect() if cls not in _MONGO_INIT_MODELS: cls.auto_create_index = global_conf.get('DATABASE_AUTO_CREATE_INDEX', True) cls.case_insensitive_index = global_conf.get('DATABASE_CASE_INSENSITIVE_INDEX', False) cls._create_index() _MONGO_INIT_MODELS.append(cls) @classmethod def connect(cls): db_alias = cls._meta.get('db_alias', 'default') if db_alias not in _MONGO_CONNECTIONS: global_conf = config.get_global() databases = global_conf.get('DATABASES', {}) if db_alias not in databases: raise ERROR_DB_CONFIGURATION(backend=db_alias) db_conf = databases[db_alias].copy() if 'read_preference' in db_conf: read_preference = getattr(ReadPreference, db_conf['read_preference'], None) if read_preference: db_conf['read_preference'] = read_preference else: del db_conf['read_preference'] register_connection(db_alias, **db_conf) _MONGO_CONNECTIONS.append(db_alias) @classmethod def _create_index(cls): if cls.auto_create_index: indexes = cls._meta.get('indexes', []) if len(indexes) > 0: _LOGGER.debug(f'Create MongoDB Indexes ({cls.__name__} Model: {len(indexes)} Indexes)') for index in indexes: try: if cls.case_insensitive_index: cls.create_index(index, collation={"locale": "en", "strength": 2}) else: cls.create_index(index) except Exception as e: _LOGGER.error(f'Index Creation Failure: {e}') @classmethod def create(cls, data): create_data = {} unique_fields = [] for name, field in cls._fields.items(): if field.unique: if isinstance(field.unique_with, str): unique_fields.append([field.name, field.unique_with]) elif isinstance(field.unique_with, list): unique_fields.append([field.name] + field.unique_with) else: unique_fields.append([field.name]) if name in data: create_data[name] = data[name] else: generate_id = getattr(field, 'generate_id', None) if generate_id: create_data[name] = utils.generate_id(generate_id) if getattr(field, 'auto_now', False): create_data[name] = datetime.utcnow() elif getattr(field, 'auto_now_add', False): create_data[name] = datetime.utcnow() for unique_field in unique_fields: conditions = {} for f in unique_field: conditions[f] = data.get(f) vos = cls.filter(**conditions) if vos.count() > 0: raise ERROR_SAVE_UNIQUE_VALUES(keys=unique_field) try: new_vo = cls(**create_data).save() except Exception as e: raise ERROR_DB_QUERY(reason=e) return new_vo def update(self, data): unique_fields = [] updatable_fields = self._meta.get( 'updatable_fields', list( filter( lambda x: x != self._meta.get('id_field', 'id'), self._fields.keys() ) ) ) for name, field in self._fields.items(): if field.unique: if isinstance(field.unique_with, str): unique_fields.append([field.name, field.unique_with]) elif isinstance(field.unique_with, list): unique_fields.append([field.name] + field.unique_with) else: unique_fields.append([field.name]) if getattr(field, 'auto_now', False): if name not in data.keys(): data[name] = datetime.utcnow() for unique_field in unique_fields: conditions = {'pk__ne': self.pk} for f in unique_field: conditions[f] = data.get(f) vos = self.filter(**conditions) if vos.count() > 0: raise ERROR_SAVE_UNIQUE_VALUES(keys=unique_field) for key in list(data.keys()): if key not in updatable_fields: del data[key] if data != {}: try: super().update(**data) self.reload() except Exception as e: raise ERROR_DB_QUERY(reason=e) return self def delete(self): try: super().delete() except OperationError as e: _raise_reference_error(self.__class__.__name__, str(e)) raise ERROR_DB_QUERY(reason=e) except Exception as e: raise ERROR_DB_QUERY(reason=e) def terminate(self): super().delete() def increment(self, key, amount=1): key = key.replace('.', '__') inc_data = { f'inc__{key}': amount } super().update(**inc_data) self.reload() return self def decrement(self, key, amount=1): key = key.replace('.', '__') dec_data = { f'dec__{key}': amount } super().update(**dec_data) self.reload() return self def set_data(self, key, data): key = key.replace('.', '__') set_data = { f'set__{key}': data } super().update(**set_data) self.reload() return self def unset_data(self, *keys): unset_data = {} for key in keys: key = key.replace('.', '__') unset_data[f'unset__{key}'] = 1 super().update(**unset_data) self.reload() return self def append(self, key, data): key = key.replace('.', '__') append_data = {} field = getattr(self._fields.get(key, {}), 'field', None) if field and isinstance(field, EmbeddedDocumentField): reference_model = field.document_type_obj append_data[f'push__{key}'] = reference_model(**data) else: append_data[f'push__{key}'] = data super().update(**append_data) self.reload() return self def remove(self, key, data): key = key.replace('.', '__') remove_data = { f'pull__{key}': data } super().update(**remove_data) self.reload() return self @classmethod def get(cls, only=None, **conditions): vos = cls.filter(**conditions) if vos.count() == 0: keys = tuple(conditions.keys()) values = tuple(conditions.values()) if len(keys) == 1: raise ERROR_NOT_FOUND(key=keys[0], value=values[0]) else: raise ERROR_NOT_FOUND(key=keys, value=values) if only: only = cls._remove_duplicate_only_keys(only) vos = vos.only(*only) return vos.first() @classmethod def filter(cls, **conditions): change_conditions = {} for key, value in conditions.items(): if isinstance(value, list): change_conditions[f'{key}__in'] = value else: change_conditions[key] = value return cls.objects.filter(**change_conditions) def to_dict(self): return self.to_mongo() @staticmethod def _check_operator_value(is_multiple, operator, value, condition): if is_multiple: if not isinstance(value, list): raise ERROR_OPERATOR_LIST_VALUE_TYPE(operator=operator, condition=condition) else: if isinstance(value, list): raise ERROR_OPERATOR_VALUE_TYPE(operator=operator, condition=condition) @classmethod def _check_reference_field(cls, key): ref_keys = cls._meta.get('reference_query_keys', {}).keys() if key in ref_keys: return False else: return True @classmethod def _get_reference_model(cls, key): for ref_key, ref_conf in cls._meta.get('reference_query_keys', {}).items(): if key.startswith(ref_key) and key[len(ref_key)] == '.': if isinstance(ref_conf, dict): ref_model = ref_conf.get('model') foreign_key = ref_conf.get('foreign_key') else: ref_model = ref_conf foreign_key = None ref_query_key = key.replace(f'{ref_key}.', '') if ref_model == 'self': ref_model = cls return ref_model, ref_key, ref_query_key, foreign_key return None, None, None, None @classmethod def _change_reference_condition(cls, key, value, operator): ref_model, ref_key, ref_query_key, foreign_key = cls._get_reference_model(key) if ref_model: if value is None: return ref_key, value, operator else: ref_vos, total_count = ref_model.query( filter=[{'k': ref_query_key, 'v': value, 'o': operator}]) if foreign_key: ref_values = [] for ref_vo in ref_vos: ref_value = getattr(ref_vo, foreign_key) if ref_value: ref_values.append(ref_value) else: ref_values = list(ref_vos) return ref_key, ref_values, 'in' else: return key, value, operator @classmethod def _make_condition(cls, condition): key = condition.get('key', condition.get('k')) value = condition.get('value', condition.get('v')) operator = condition.get('operator', condition.get('o')) change_query_keys = cls._meta.get('change_query_keys', {}) if operator not in FILTER_OPERATORS: raise ERROR_DB_QUERY(reason=f'Filter operator is not supported. (operator = ' f'{FILTER_OPERATORS.keys()})') resolver, mongo_operator, is_multiple = FILTER_OPERATORS.get(operator) cls._check_operator_value(is_multiple, operator, value, condition) if key and operator: if key in change_query_keys: key = change_query_keys[key] if operator not in ['regex', 'regex_in']: if cls._check_reference_field(key): key, value, operator = cls._change_reference_condition(key, value, operator) resolver, mongo_operator, is_multiple = FILTER_OPERATORS[operator] key = key.replace('.', '__') return resolver(key, value, mongo_operator, is_multiple) else: raise ERROR_DB_QUERY(reason='Filter condition should have key, value and operator.') @classmethod def _make_filter(cls, filter, filter_or): _filter = None _filter_or = None if len(filter) > 0: _filter = reduce(lambda x, y: x & y, map(cls._make_condition, filter)) if len(filter_or) > 0: _filter_or = reduce(lambda x, y: x | y, map(cls._make_condition, filter_or)) if _filter and _filter_or: _filter = _filter & _filter_or else: _filter = _filter or _filter_or return _filter @classmethod def _remove_duplicate_only_keys(cls, only): changed_only = [] duplicated_only = [] for key in only: exists = False for changed_key in changed_only: if key == changed_key or key.startswith(f'{changed_key}.'): exists = True elif changed_key.startswith(f'{key}.'): duplicated_only.append(changed_key) if exists is False: changed_only.append(key) if len(duplicated_only) > 0: changed_only = list(set(changed_only) - set(duplicated_only)) return changed_only @classmethod def query(cls, *args, only=None, exclude=None, all_fields=False, filter=None, filter_or=None, sort=None, page=None, minimal=False, count_only=False, **kwargs): if filter is None: filter = [] if filter_or is None: filter_or = [] if sort is None: sort = {} if page is None: page = {} _order_by = None minimal_fields = cls._meta.get('minimal_fields') _filter = cls._make_filter(filter, filter_or) if 'key' in sort: if sort.get('desc', False): _order_by = f'-{sort['key']}' else: _order_by = f'{sort['key']}' try: if cls.case_insensitive_index: vos = cls.objects.filter(_filter).collation({'locale': 'en', 'strength': 2}) else: vos = cls.objects.filter(_filter) if _order_by: vos = vos.order_by(_order_by) if only: if 'key' in sort: if sort['key'] not in only: only.append(sort['key']) else: ordering = cls._meta.get('ordering') for key in ordering: if key.startswith('+') or key.startswith('-'): key = key[1:] if key not in only: only.append(key) only = cls._remove_duplicate_only_keys(only) vos = vos.only(*only) if exclude: vos = vos.exclude(*exclude) if minimal and minimal_fields: vos = vos.only(*minimal_fields) if all_fields: vos = vos.all_fields() total_count = vos.count() if count_only: vos = [] else: if 'limit' in page and page['limit'] > 0: start = page.get('start', 1) if start < 1: start = 1 vos = vos[start - 1:start + page['limit'] - 1] return vos, total_count except Exception as e: raise ERROR_DB_QUERY(reason=e) @classmethod def _check_well_known_type(cls, value): if isinstance(value, datetime): return f'{value.isoformat()}Z' elif isinstance(value, bson.objectid.ObjectId): return str(value) elif isinstance(value, Document): return str(value.id) elif isinstance(value, EmbeddedDocument): return dict(value.to_mongo()) else: return value @classmethod def _make_aggregate_values(cls, cursor): values = [] for row in cursor: data = {} for key, value in row.items(): if key == '_id' and isinstance(row[key], dict): for group_key, group_value in row[key].items(): data[group_key] = cls._check_well_known_type(group_value) else: data[key] = cls._check_well_known_type(value) values.append(data) return values @classmethod def _make_distinct_values(cls, values): changed_values = [] for value in values: changed_values.append(cls._check_well_known_type(value)) return changed_values @classmethod def _get_group_fields(cls, condition, _before_group_keys): key = condition.get('key', condition.get('k')) name = condition.get('name', condition.get('n')) operator = condition.get('operator', condition.get('o')) value = condition.get('value', condition.get('v')) date_format = condition.get('date_format') if operator not in STAT_OPERATORS: raise ERROR_DB_QUERY(reason=f"'aggregate.group.fields' operator is not supported. " f"(operator = {STAT_OPERATORS.keys()})") if operator not in ['count', 'date'] and key is None: raise ERROR_DB_QUERY(reason=f"'aggregate.group.fields' condition requires a key: {condition}") if name is None: raise ERROR_DB_QUERY(reason=f"'aggregate.group.fields' condition requires a name: {condition}") if operator == 'date' and value is None: raise ERROR_DB_QUERY(reason=f"'aggregate.group.fields' condition requires a value: {condition}") if key in _before_group_keys: key = f'_id.{key}' return key, name, operator, value, date_format @classmethod def _get_group_keys(cls, condition, _before_group_keys): key = condition.get('key', condition.get('k')) name = condition.get('name', condition.get('n')) date_format = condition.get('date_format') if key is None: raise ERROR_DB_QUERY(reason=f"'aggregate.group.keys' condition requires a key: {condition}") if name is None: raise ERROR_DB_QUERY(reason=f"'aggregate.group.keys' condition requires a name: {condition}") if key in _before_group_keys: key = f'_id.{key}' if date_format: rule = { '$dateToString': { 'format': date_format, 'date': f'${key}' } } else: rule = f'${key}' return key, name, rule @classmethod def _make_group_rule(cls, options, _before_group_keys): _group_keys = [] _include_project = False _include_second_project = False _project_fields = {} _second_project_fields = {} _project_rules = [] _rules = [] _group_rule = { '$group': { '_id': {} } } _keys = options.get('keys', []) _fields = options.get('fields', []) # if len(_keys) == 0: # raise ERROR_REQUIRED_PARAMETER(key='aggregate.group.keys') for condition in _keys: key, name, rule = cls._get_group_keys(condition, _before_group_keys) _group_keys.append(name) _group_rule['$group']['_id'][name] = rule for condition in _fields: key, name, operator, value, date_format = cls._get_group_fields(condition, _before_group_keys) rule = STAT_OPERATORS[operator](key, operator, name, value, date_format) if rule.get('group') is not None: _group_rule['$group'].update(rule['group']) if rule.get('project') is not None: _include_project = True _project_fields.update(rule['project']) else: _project_fields[name] = 1 if rule.get('second_project') is not None: _include_second_project = True _second_project_fields.update(rule['second_project']) else: _second_project_fields[name] = 1 _rules.append(_group_rule) if _include_project: _rules.append({ '$project': _project_fields }) if _include_second_project: _rules.append({ '$project': _second_project_fields }) return _rules, _group_keys @classmethod def _make_unwind_rule(cls, options): if 'path' not in options: raise ERROR_REQUIRED_PARAMETER(key='aggregate.unwind.path') return { '$unwind': f"${options["path"]}" } @classmethod def _make_count_rule(cls, options): if 'name' not in options: raise ERROR_REQUIRED_PARAMETER(key='aggregate.count.name') return { '$count': options['name'] } @classmethod def _make_sort_rule(cls, options, _group_keys): if 'key' not in options: raise ERROR_REQUIRED_PARAMETER(key='aggregate.sort.key') if options['key'] in _group_keys: sort_name = f'_id.{options['key']}' else: sort_name = options['key'] if options.get('desc', False): return { '$sort': {sort_name: -1} } else: return { '$sort': {sort_name: 1} } @classmethod def _make_aggregate_rules(cls, aggregate): _aggregate_rules = [] _group_keys = [] if not isinstance(aggregate, list): raise ERROR_INVALID_PARAMETER_TYPE(key='aggregate', type='list') for stage in aggregate: if 'unwind' in stage: rule = cls._make_unwind_rule(stage['unwind']) _aggregate_rules.append(rule) elif 'group' in stage: rules, group_keys = cls._make_group_rule(stage['group'], _group_keys) _aggregate_rules += rules _group_keys += group_keys elif 'count' in stage: rule = cls._make_count_rule(stage['count']) _aggregate_rules.append(rule) elif 'sort' in stage: rule = cls._make_sort_rule(stage['sort'], _group_keys) _aggregate_rules.append(rule) else: raise ERROR_REQUIRED_PARAMETER(key='aggregate.unwind or aggregate.group or ' 'aggregate.count or aggregate.sort') return _aggregate_rules, _group_keys @classmethod def _stat_aggregate(cls, vos, aggregate, page): result = {} pipeline = [] _aggregate_rules, _group_keys = cls._make_aggregate_rules(aggregate) for rule in _aggregate_rules: pipeline.append(rule) if 'limit' in page and page['limit'] > 0: limit = page['limit'] start = page.get('start', 1) start = 1 if start < 1 else start result['total_count'] = 0 cursor = vos.aggregate(pipeline + [{'$count': 'total_count'}]) for c in cursor: result['total_count'] = c['total_count'] break if start > 1: pipeline.append({ '$skip': start - 1 }) pipeline.append({ '$limit': limit }) cursor = vos.aggregate(pipeline) result['results'] = cls._make_aggregate_values(cursor) return result @classmethod def _stat_distinct(cls, vos, distinct, page): result = {} values = vos.distinct(distinct) try: values.sort() except Exception: pass if 'limit' in page and page['limit'] > 0: start = page.get('start', 1) if start < 1: start = 1 result['total_count'] = len(values) values = values[start - 1:start + page['limit'] - 1] result['results'] = cls._make_distinct_values(values) return result @classmethod def stat(cls, *args, aggregate=None, distinct=None, filter=None, filter_or=None, page=None, **kwargs): if filter is None: filter = [] if filter_or is None: filter_or = [] if page is None: page = {} if not (aggregate or distinct): raise ERROR_REQUIRED_PARAMETER(key='aggregate') _filter = cls._make_filter(filter, filter_or) try: if cls.case_insensitive_index: vos = cls.objects.filter(_filter).collation({'locale': 'en', 'strength': 2}) else: vos = cls.objects.filter(_filter) if aggregate: return cls._stat_aggregate(vos, aggregate, page) elif distinct: return cls._stat_distinct(vos, distinct, page) except Exception as e: raise ERROR_DB_QUERY(reason=e)
import bson import re import logging from datetime import datetime from functools import reduce from mongoengine import EmbeddedDocumentField, EmbeddedDocument, Document, QuerySet, register_connection from pymongo import ReadPreference from mongoengine.errors import * from spaceone.core import config from spaceone.core import utils from spaceone.core.error import * from spaceone.core.model import BaseModel from spaceone.core.model.mongo_model.filter_operator import FILTER_OPERATORS from spaceone.core.model.mongo_model.stat_operator import STAT_OPERATORS _REFERENCE_ERROR_FORMAT = r'Could not delete document \((\w+)\.\w+ refers to it\)' _MONGO_CONNECTIONS = [] _MONGO_INIT_MODELS = [] _LOGGER = logging.getLogger(__name__) def _raise_reference_error(class_name, message): m = re.findall(_REFERENCE_ERROR_FORMAT, message) if len(m) > 0: raise ERROR_EXIST_RESOURCE(parent=class_name, child=m[0]) class MongoCustomQuerySet(QuerySet): def last(self): return self.order_by('-id').first() def update(self, *args, **kwargs): if len(args) > 0 and isinstance(args[0], dict): kwargs.update(args[0]) super().update(**kwargs) def increment(self, key, amount=1): key = key.replace('.', '__') inc_data = { f'inc__{key}': amount } super().update(**inc_data) def decrement(self, key, amount=1): key = key.replace('.', '__') dec_data = { f'dec__{key}': amount } super().update(**dec_data) def set_data(self, key, data): key = key.replace('.', '__') set_data = { f'set__{key}': data } super().update(**set_data) def unset_data(self, *keys): unset_data = {} for key in keys: key = key.replace('.', '__') unset_data[f'unset__{key}'] = 1 super().update(**unset_data) def append(self, key, data): key = key.replace('.', '__') append_data = { f'push__{key}': data } super().update(**append_data) def remove(self, key, data): key = key.replace('.', '__') remove_data = { f'pull__{key}': data } super().update(**remove_data) class MongoModel(Document, BaseModel): auto_create_index = True case_insensitive_index = False meta = { 'abstract': True, 'queryset_class': MongoCustomQuerySet, 'auto_create_index': False } @classmethod def init(cls): global_conf = config.get_global() if global_conf.get('MOCK_MODE', False) == False: cls.connect() if cls not in _MONGO_INIT_MODELS: cls.auto_create_index = global_conf.get('DATABASE_AUTO_CREATE_INDEX', True) cls.case_insensitive_index = global_conf.get('DATABASE_CASE_INSENSITIVE_INDEX', False) cls._create_index() _MONGO_INIT_MODELS.append(cls) @classmethod def connect(cls): db_alias = cls._meta.get('db_alias', 'default') if db_alias not in _MONGO_CONNECTIONS: global_conf = config.get_global() databases = global_conf.get('DATABASES', {}) if db_alias not in databases: raise ERROR_DB_CONFIGURATION(backend=db_alias) db_conf = databases[db_alias].copy() if 'read_preference' in db_conf: read_preference = getattr(ReadPreference, db_conf['read_preference'], None) if read_preference: db_conf['read_preference'] = read_preference else: del db_conf['read_preference'] register_connection(db_alias, **db_conf) _MONGO_CONNECTIONS.append(db_alias) @classmethod def _create_index(cls): if cls.auto_create_index: indexes = cls._meta.get('indexes', []) if len(indexes) > 0: _LOGGER.debug(f'Create MongoDB Indexes ({cls.__name__} Model: {len(indexes)} Indexes)') for index in indexes: try: if cls.case_insensitive_index: cls.create_index(index, collation={"locale": "en", "strength": 2}) else: cls.create_index(index) except Exception as e: _LOGGER.error(f'Index Creation Failure: {e}') @classmethod def create(cls, data): create_data = {} unique_fields = [] for name, field in cls._fields.items(): if field.unique: if isinstance(field.unique_with, str): unique_fields.append([field.name, field.unique_with]) elif isinstance(field.unique_with, list): unique_fields.append([field.name] + field.unique_with) else: unique_fields.append([field.name]) if name in data: create_data[name] = data[name] else: generate_id = getattr(field, 'generate_id', None) if generate_id: create_data[name] = utils.generate_id(generate_id) if getattr(field, 'auto_now', False): create_data[name] = datetime.utcnow() elif getattr(field, 'auto_now_add', False): create_data[name] = datetime.utcnow() for unique_field in unique_fields: conditions = {} for f in unique_field: conditions[f] = data.get(f) vos = cls.filter(**conditions) if vos.count() > 0: raise ERROR_SAVE_UNIQUE_VALUES(keys=unique_field) try: new_vo = cls(**create_data).save() except Exception as e: raise ERROR_DB_QUERY(reason=e) return new_vo def update(self, data): unique_fields = [] updatable_fields = self._meta.get( 'updatable_fields', list( filter( lambda x: x != self._meta.get('id_field', 'id'), self._fields.keys() ) ) ) for name, field in self._fields.items(): if field.unique: if isinstance(field.unique_with, str): unique_fields.append([field.name, field.unique_with]) elif isinstance(field.unique_with, list): unique_fields.append([field.name] + field.unique_with) else: unique_fields.append([field.name]) if getattr(field, 'auto_now', False): if name not in data.keys(): data[name] = datetime.utcnow() for unique_field in unique_fields: conditions = {'pk__ne': self.pk} for f in unique_field: conditions[f] = data.get(f) vos = self.filter(**conditions) if vos.count() > 0: raise ERROR_SAVE_UNIQUE_VALUES(keys=unique_field) for key in list(data.keys()): if key not in updatable_fields: del data[key] if data != {}: try: super().update(**data) self.reload() except Exception as e: raise ERROR_DB_QUERY(reason=e) return self def delete(self): try: super().delete() except OperationError as e: _raise_reference_error(self.__class__.__name__, str(e)) raise ERROR_DB_QUERY(reason=e) except Exception as e: raise ERROR_DB_QUERY(reason=e) def terminate(self): super().delete() def increment(self, key, amount=1): key = key.replace('.', '__') inc_data = { f'inc__{key}': amount } super().update(**inc_data) self.reload() return self def decrement(self, key, amount=1): key = key.replace('.', '__') dec_data = { f'dec__{key}': amount } super().update(**dec_data) self.reload() return self def set_data(self, key, data): key = key.replace('.', '__') set_data = { f'set__{key}': data } super().update(**set_data) self.reload() return self def unset_data(self, *keys): unset_data = {} for key in keys: key = key.replace('.', '__') unset_data[f'unset__{key}'] = 1 super().update(**unset_data) self.reload() return self def append(self, key, data): key = key.replace('.', '__') append_data = {} field = getattr(self._fields.get(key, {}), 'field', None) if field and isinstance(field, EmbeddedDocumentField): reference_model = field.document_type_obj append_data[f'push__{key}'] = reference_model(**data) else: append_data[f'push__{key}'] = data super().update(**append_data) self.reload() return self def remove(self, key, data): key = key.replace('.', '__') remove_data = { f'pull__{key}': data } super().update(**remove_data) self.reload() return self @classmethod def get(cls, only=None, **conditions): vos = cls.filter(**conditions) if vos.count() == 0: keys = tuple(conditions.keys()) values = tuple(conditions.values()) if len(keys) == 1: raise ERROR_NOT_FOUND(key=keys[0], value=values[0]) else: raise ERROR_NOT_FOUND(key=keys, value=values) if only: only = cls._remove_duplicate_only_keys(only) vos = vos.only(*only) return vos.first() @classmethod def filter(cls, **conditions): change_conditions = {} for key, value in conditions.items(): if isinstance(value, list): change_conditions[f'{key}__in'] = value else: change_conditions[key] = value return cls.objects.filter(**change_conditions) def to_dict(self): return self.to_mongo() @staticmethod def _check_operator_value(is_multiple, operator, value, condition): if is_multiple: if not isinstance(value, list): raise ERROR_OPERATOR_LIST_VALUE_TYPE(operator=operator, condition=condition) else: if isinstance(value, list): raise ERROR_OPERATOR_VALUE_TYPE(operator=operator, condition=condition) @classmethod def _check_reference_field(cls, key): ref_keys = cls._meta.get('reference_query_keys', {}).keys() if key in ref_keys: return False else: return True @classmethod def _get_reference_model(cls, key): for ref_key, ref_conf in cls._meta.get('reference_query_keys', {}).items(): if key.startswith(ref_key) and key[len(ref_key)] == '.': if isinstance(ref_conf, dict): ref_model = ref_conf.get('model') foreign_key = ref_conf.get('foreign_key') else: ref_model = ref_conf foreign_key = None ref_query_key = key.replace(f'{ref_key}.', '') if ref_model == 'self': ref_model = cls return ref_model, ref_key, ref_query_key, foreign_key return None, None, None, None @classmethod def _change_reference_condition(cls, key, value, operator): ref_model, ref_key, ref_query_key, foreign_key = cls._get_reference_model(key) if ref_model: if value is None: return ref_key, value, operator else: ref_vos, total_count = ref_model.query( filter=[{'k': ref_query_key, 'v': value, 'o': operator}]) if foreign_key: ref_values = [] for ref_vo in ref_vos: ref_value = getattr(ref_vo, foreign_key) if ref_value: ref_values.append(ref_value) else: ref_values = list(ref_vos) return ref_key, ref_values, 'in' else: return key, value, operator @classmethod def _make_condition(cls, condition): key = condition.get('key', condition.get('k')) value = condition.get('value', condition.get('v')) operator = condition.get('operator', condition.get('o')) change_query_keys = cls._meta.get('change_query_keys', {}) if operator not in FILTER_OPERATORS: raise ERROR_DB_QUERY(reason=f'Filter operator is not supported. (operator = ' f'{FILTER_OPERATORS.keys()})') resolver, mongo_operator, is_multiple = FILTER_OPERATORS.get(operator) cls._check_operator_value(is_multiple, operator, value, condition) if key and operator: if key in change_query_keys: key = change_query_keys[key] if operator not in ['regex', 'regex_in']: if cls._check_reference_field(key): key, value, operator = cls._change_reference_condition(key, value, operator) resolver, mongo_operator, is_multiple = FILTER_OPERATORS[operator] key = key.replace('.', '__') return resolver(key, value, mongo_operator, is_multiple) else: raise ERROR_DB_QUERY(reason='Filter condition should have key, value and operator.') @classmethod def _make_filter(cls, filter, filter_or): _filter = None _filter_or = None if len(filter) > 0: _filter = reduce(lambda x, y: x & y, map(cls._make_condition, filter)) if len(filter_or) > 0: _filter_or = reduce(lambda x, y: x | y, map(cls._make_condition, filter_or)) if _filter and _filter_or: _filter = _filter & _filter_or else: _filter = _filter or _filter_or return _filter @classmethod def _remove_duplicate_only_keys(cls, only): changed_only = [] duplicated_only = [] for key in only: exists = False for changed_key in changed_only: if key == changed_key or key.startswith(f'{changed_key}.'): exists = True elif changed_key.startswith(f'{key}.'): duplicated_only.append(changed_key) if exists is False: changed_only.append(key) if len(duplicated_only) > 0: changed_only = list(set(changed_only) - set(duplicated_only)) return changed_only @classmethod def query(cls, *args, only=None, exclude=None, all_fields=False, filter=None, filter_or=None, sort=None, page=None, minimal=False, count_only=False, **kwargs): if filter is None: filter = [] if filter_or is None: filter_or = [] if sort is None: sort = {} if page is None: page = {} _order_by = None minimal_fields = cls._meta.get('minimal_fields') _filter = cls._make_filter(filter, filter_or) if 'key' in sort: if sort.get('desc', False): _order_by = f'-{sort["key"]}' else: _order_by = f'{sort["key"]}' try: if cls.case_insensitive_index: vos = cls.objects.filter(_filter).collation({'locale': 'en', 'strength': 2}) else: vos = cls.objects.filter(_filter) if _order_by: vos = vos.order_by(_order_by) if only: if 'key' in sort: if sort['key'] not in only: only.append(sort['key']) else: ordering = cls._meta.get('ordering') for key in ordering: if key.startswith('+') or key.startswith('-'): key = key[1:] if key not in only: only.append(key) only = cls._remove_duplicate_only_keys(only) vos = vos.only(*only) if exclude: vos = vos.exclude(*exclude) if minimal and minimal_fields: vos = vos.only(*minimal_fields) if all_fields: vos = vos.all_fields() total_count = vos.count() if count_only: vos = [] else: if 'limit' in page and page['limit'] > 0: start = page.get('start', 1) if start < 1: start = 1 vos = vos[start - 1:start + page['limit'] - 1] return vos, total_count except Exception as e: raise ERROR_DB_QUERY(reason=e) @classmethod def _check_well_known_type(cls, value): if isinstance(value, datetime): return f'{value.isoformat()}Z' elif isinstance(value, bson.objectid.ObjectId): return str(value) elif isinstance(value, Document): return str(value.id) elif isinstance(value, EmbeddedDocument): return dict(value.to_mongo()) else: return value @classmethod def _make_aggregate_values(cls, cursor): values = [] for row in cursor: data = {} for key, value in row.items(): if key == '_id' and isinstance(row[key], dict): for group_key, group_value in row[key].items(): data[group_key] = cls._check_well_known_type(group_value) else: data[key] = cls._check_well_known_type(value) values.append(data) return values @classmethod def _make_distinct_values(cls, values): changed_values = [] for value in values: changed_values.append(cls._check_well_known_type(value)) return changed_values @classmethod def _get_group_fields(cls, condition, _before_group_keys): key = condition.get('key', condition.get('k')) name = condition.get('name', condition.get('n')) operator = condition.get('operator', condition.get('o')) value = condition.get('value', condition.get('v')) date_format = condition.get('date_format') if operator not in STAT_OPERATORS: raise ERROR_DB_QUERY(reason=f"'aggregate.group.fields' operator is not supported. " f"(operator = {STAT_OPERATORS.keys()})") if operator not in ['count', 'date'] and key is None: raise ERROR_DB_QUERY(reason=f"'aggregate.group.fields' condition requires a key: {condition}") if name is None: raise ERROR_DB_QUERY(reason=f"'aggregate.group.fields' condition requires a name: {condition}") if operator == 'date' and value is None: raise ERROR_DB_QUERY(reason=f"'aggregate.group.fields' condition requires a value: {condition}") if key in _before_group_keys: key = f'_id.{key}' return key, name, operator, value, date_format @classmethod def _get_group_keys(cls, condition, _before_group_keys): key = condition.get('key', condition.get('k')) name = condition.get('name', condition.get('n')) date_format = condition.get('date_format') if key is None: raise ERROR_DB_QUERY(reason=f"'aggregate.group.keys' condition requires a key: {condition}") if name is None: raise ERROR_DB_QUERY(reason=f"'aggregate.group.keys' condition requires a name: {condition}") if key in _before_group_keys: key = f'_id.{key}' if date_format: rule = { '$dateToString': { 'format': date_format, 'date': f'${key}' } } else: rule = f'${key}' return key, name, rule @classmethod def _make_group_rule(cls, options, _before_group_keys): _group_keys = [] _include_project = False _include_second_project = False _project_fields = {} _second_project_fields = {} _project_rules = [] _rules = [] _group_rule = { '$group': { '_id': {} } } _keys = options.get('keys', []) _fields = options.get('fields', []) # if len(_keys) == 0: # raise ERROR_REQUIRED_PARAMETER(key='aggregate.group.keys') for condition in _keys: key, name, rule = cls._get_group_keys(condition, _before_group_keys) _group_keys.append(name) _group_rule['$group']['_id'][name] = rule for condition in _fields: key, name, operator, value, date_format = cls._get_group_fields(condition, _before_group_keys) rule = STAT_OPERATORS[operator](key, operator, name, value, date_format) if rule.get('group') is not None: _group_rule['$group'].update(rule['group']) if rule.get('project') is not None: _include_project = True _project_fields.update(rule['project']) else: _project_fields[name] = 1 if rule.get('second_project') is not None: _include_second_project = True _second_project_fields.update(rule['second_project']) else: _second_project_fields[name] = 1 _rules.append(_group_rule) if _include_project: _rules.append({ '$project': _project_fields }) if _include_second_project: _rules.append({ '$project': _second_project_fields }) return _rules, _group_keys @classmethod def _make_unwind_rule(cls, options): if 'path' not in options: raise ERROR_REQUIRED_PARAMETER(key='aggregate.unwind.path') return { '$unwind': f"${options['path']}" } @classmethod def _make_count_rule(cls, options): if 'name' not in options: raise ERROR_REQUIRED_PARAMETER(key='aggregate.count.name') return { '$count': options['name'] } @classmethod def _make_sort_rule(cls, options, _group_keys): if 'key' not in options: raise ERROR_REQUIRED_PARAMETER(key='aggregate.sort.key') if options['key'] in _group_keys: sort_name = f'_id.{options["key"]}' else: sort_name = options['key'] if options.get('desc', False): return { '$sort': {sort_name: -1} } else: return { '$sort': {sort_name: 1} } @classmethod def _make_aggregate_rules(cls, aggregate): _aggregate_rules = [] _group_keys = [] if not isinstance(aggregate, list): raise ERROR_INVALID_PARAMETER_TYPE(key='aggregate', type='list') for stage in aggregate: if 'unwind' in stage: rule = cls._make_unwind_rule(stage['unwind']) _aggregate_rules.append(rule) elif 'group' in stage: rules, group_keys = cls._make_group_rule(stage['group'], _group_keys) _aggregate_rules += rules _group_keys += group_keys elif 'count' in stage: rule = cls._make_count_rule(stage['count']) _aggregate_rules.append(rule) elif 'sort' in stage: rule = cls._make_sort_rule(stage['sort'], _group_keys) _aggregate_rules.append(rule) else: raise ERROR_REQUIRED_PARAMETER(key='aggregate.unwind or aggregate.group or ' 'aggregate.count or aggregate.sort') return _aggregate_rules, _group_keys @classmethod def _stat_aggregate(cls, vos, aggregate, page): result = {} pipeline = [] _aggregate_rules, _group_keys = cls._make_aggregate_rules(aggregate) for rule in _aggregate_rules: pipeline.append(rule) if 'limit' in page and page['limit'] > 0: limit = page['limit'] start = page.get('start', 1) start = 1 if start < 1 else start result['total_count'] = 0 cursor = vos.aggregate(pipeline + [{'$count': 'total_count'}]) for c in cursor: result['total_count'] = c['total_count'] break if start > 1: pipeline.append({ '$skip': start - 1 }) pipeline.append({ '$limit': limit }) cursor = vos.aggregate(pipeline) result['results'] = cls._make_aggregate_values(cursor) return result @classmethod def _stat_distinct(cls, vos, distinct, page): result = {} values = vos.distinct(distinct) try: values.sort() except Exception: pass if 'limit' in page and page['limit'] > 0: start = page.get('start', 1) if start < 1: start = 1 result['total_count'] = len(values) values = values[start - 1:start + page['limit'] - 1] result['results'] = cls._make_distinct_values(values) return result @classmethod def stat(cls, *args, aggregate=None, distinct=None, filter=None, filter_or=None, page=None, **kwargs): if filter is None: filter = [] if filter_or is None: filter_or = [] if page is None: page = {} if not (aggregate or distinct): raise ERROR_REQUIRED_PARAMETER(key='aggregate') _filter = cls._make_filter(filter, filter_or) try: if cls.case_insensitive_index: vos = cls.objects.filter(_filter).collation({'locale': 'en', 'strength': 2}) else: vos = cls.objects.filter(_filter) if aggregate: return cls._stat_aggregate(vos, aggregate, page) elif distinct: return cls._stat_distinct(vos, distinct, page) except Exception as e: raise ERROR_DB_QUERY(reason=e)
from dataclasses import dataclass from typing import Dict, List, Optional, Tuple import aiosqlite from beer.consensus.block_record import BlockRecord from beer.types.blockchain_format.sized_bytes import bytes32 from beer.types.blockchain_format.sub_epoch_summary import SubEpochSummary from beer.types.coin_spend import CoinSpend from beer.types.header_block import HeaderBlock from beer.util.db_wrapper import DBWrapper from beer.util.ints import uint32, uint64 from beer.util.lru_cache import LRUCache from beer.util.streamable import Streamable, streamable from beer.wallet.block_record import HeaderBlockRecord @dataclass(frozen=True) @streamable class AdditionalCoinSpends(Streamable): coin_spends_list: List[CoinSpend] class WalletBlockStore: """ This object handles HeaderBlocks and Blocks stored in DB used by wallet. """ db: aiosqlite.Connection db_wrapper: DBWrapper block_cache: LRUCache @classmethod async def create(cls, db_wrapper: DBWrapper): self = cls() self.db_wrapper = db_wrapper self.db = db_wrapper.db await self.db.execute("pragma journal_mode=wal") await self.db.execute("pragma synchronous=2") await self.db.execute( "CREATE TABLE IF NOT EXISTS header_blocks(header_hash text PRIMARY KEY, height int," " timestamp int, block blob)" ) await self.db.execute("CREATE INDEX IF NOT EXISTS header_hash on header_blocks(header_hash)") await self.db.execute("CREATE INDEX IF NOT EXISTS timestamp on header_blocks(timestamp)") await self.db.execute("CREATE INDEX IF NOT EXISTS height on header_blocks(height)") # Block records await self.db.execute( "CREATE TABLE IF NOT EXISTS block_records(header_hash " "text PRIMARY KEY, prev_hash text, height bigint, weight bigint, total_iters text," "block blob, sub_epoch_summary blob, is_peak tinyint)" ) await self.db.execute( "CREATE TABLE IF NOT EXISTS additional_coin_spends(header_hash text PRIMARY KEY, spends_list_blob blob)" ) # Height index so we can look up in order of height for sync purposes await self.db.execute("CREATE INDEX IF NOT EXISTS height on block_records(height)") await self.db.execute("CREATE INDEX IF NOT EXISTS hh on block_records(header_hash)") await self.db.execute("CREATE INDEX IF NOT EXISTS peak on block_records(is_peak)") await self.db.commit() self.block_cache = LRUCache(1000) return self async def _clear_database(self): cursor_2 = await self.db.execute("DELETE FROM header_blocks") await cursor_2.close() await self.db.commit() async def add_block_record( self, header_block_record: HeaderBlockRecord, block_record: BlockRecord, additional_coin_spends: List[CoinSpend], ): """ Adds a block record to the database. This block record is assumed to be connected to the chain, but it may or may not be in the LCA path. """ cached = self.block_cache.get(header_block_record.header_hash) if cached is not None: # Since write to db can fail, we remove from cache here to avoid potential inconsistency # Adding to cache only from reading self.block_cache.put(header_block_record.header_hash, None) if header_block_record.header.foliage_transaction_block is not None: timestamp = header_block_record.header.foliage_transaction_block.timestamp else: timestamp = uint64(0) cursor = await self.db.execute( "INSERT OR REPLACE INTO header_blocks VALUES(?, ?, ?, ?)", ( header_block_record.header_hash.hex(), header_block_record.height, timestamp, bytes(header_block_record), ), ) await cursor.close() cursor_2 = await self.db.execute( "INSERT OR REPLACE INTO block_records VALUES(?, ?, ?, ?, ?, ?, ?,?)", ( header_block_record.header.header_hash.hex(), header_block_record.header.prev_header_hash.hex(), header_block_record.header.height, header_block_record.header.weight.to_bytes(128 // 8, "big", signed=False).hex(), header_block_record.header.total_iters.to_bytes(128 // 8, "big", signed=False).hex(), bytes(block_record), None if block_record.sub_epoch_summary_included is None else bytes(block_record.sub_epoch_summary_included), False, ), ) await cursor_2.close() if len(additional_coin_spends) > 0: blob: bytes = bytes(AdditionalCoinSpends(additional_coin_spends)) cursor_3 = await self.db.execute( "INSERT OR REPLACE INTO additional_coin_spends VALUES(?, ?)", (header_block_record.header_hash.hex(), blob), ) await cursor_3.close() async def get_header_block_at(self, heights: List[uint32]) -> List[HeaderBlock]: if len(heights) == 0: return [] heights_db = tuple(heights) formatted_str = f'SELECT block from header_blocks WHERE height in ({'?,' * (len(heights_db) - 1)}?)' cursor = await self.db.execute(formatted_str, heights_db) rows = await cursor.fetchall() await cursor.close() return [HeaderBlock.from_bytes(row[0]) for row in rows] async def get_header_block_record(self, header_hash: bytes32) -> Optional[HeaderBlockRecord]: """Gets a block record from the database, if present""" cached = self.block_cache.get(header_hash) if cached is not None: return cached cursor = await self.db.execute("SELECT block from header_blocks WHERE header_hash=?", (header_hash.hex(),)) row = await cursor.fetchone() await cursor.close() if row is not None: hbr: HeaderBlockRecord = HeaderBlockRecord.from_bytes(row[0]) self.block_cache.put(hbr.header_hash, hbr) return hbr else: return None async def get_additional_coin_spends(self, header_hash: bytes32) -> Optional[List[CoinSpend]]: cursor = await self.db.execute( "SELECT spends_list_blob from additional_coin_spends WHERE header_hash=?", (header_hash.hex(),) ) row = await cursor.fetchone() await cursor.close() if row is not None: coin_spends: AdditionalCoinSpends = AdditionalCoinSpends.from_bytes(row[0]) return coin_spends.coin_spends_list else: return None async def get_block_record(self, header_hash: bytes32) -> Optional[BlockRecord]: cursor = await self.db.execute( "SELECT block from block_records WHERE header_hash=?", (header_hash.hex(),), ) row = await cursor.fetchone() await cursor.close() if row is not None: return BlockRecord.from_bytes(row[0]) return None async def get_block_records( self, ) -> Tuple[Dict[bytes32, BlockRecord], Optional[bytes32]]: """ Returns a dictionary with all blocks, as well as the header hash of the peak, if present. """ cursor = await self.db.execute("SELECT header_hash, block, is_peak from block_records") rows = await cursor.fetchall() await cursor.close() ret: Dict[bytes32, BlockRecord] = {} peak: Optional[bytes32] = None for row in rows: header_hash_bytes, block_record_bytes, is_peak = row header_hash = bytes.fromhex(header_hash_bytes) ret[header_hash] = BlockRecord.from_bytes(block_record_bytes) if is_peak: assert peak is None # Sanity check, only one peak peak = header_hash return ret, peak def rollback_cache_block(self, header_hash: bytes32): self.block_cache.remove(header_hash) async def set_peak(self, header_hash: bytes32) -> None: cursor_1 = await self.db.execute("UPDATE block_records SET is_peak=0 WHERE is_peak=1") await cursor_1.close() cursor_2 = await self.db.execute( "UPDATE block_records SET is_peak=1 WHERE header_hash=?", (header_hash.hex(),), ) await cursor_2.close() async def get_block_records_close_to_peak( self, blocks_n: int ) -> Tuple[Dict[bytes32, BlockRecord], Optional[bytes32]]: """ Returns a dictionary with all blocks, as well as the header hash of the peak, if present. """ res = await self.db.execute("SELECT header_hash, height from block_records WHERE is_peak = 1") row = await res.fetchone() await res.close() if row is None: return {}, None header_hash_bytes, peak_height = row peak: bytes32 = bytes32(bytes.fromhex(header_hash_bytes)) formatted_str = f"SELECT header_hash, block from block_records WHERE height >= {peak_height - blocks_n}" cursor = await self.db.execute(formatted_str) rows = await cursor.fetchall() await cursor.close() ret: Dict[bytes32, BlockRecord] = {} for row in rows: header_hash_bytes, block_record_bytes = row header_hash = bytes.fromhex(header_hash_bytes) ret[header_hash] = BlockRecord.from_bytes(block_record_bytes) return ret, peak async def get_header_blocks_in_range( self, start: int, stop: int, ) -> Dict[bytes32, HeaderBlock]: formatted_str = f"SELECT header_hash, block from header_blocks WHERE height >= {start} and height <= {stop}" cursor = await self.db.execute(formatted_str) rows = await cursor.fetchall() await cursor.close() ret: Dict[bytes32, HeaderBlock] = {} for row in rows: header_hash_bytes, block_record_bytes = row header_hash = bytes.fromhex(header_hash_bytes) ret[header_hash] = HeaderBlock.from_bytes(block_record_bytes) return ret async def get_block_records_in_range( self, start: int, stop: int, ) -> Dict[bytes32, BlockRecord]: """ Returns a dictionary with all blocks, as well as the header hash of the peak, if present. """ formatted_str = f"SELECT header_hash, block from block_records WHERE height >= {start} and height <= {stop}" cursor = await self.db.execute(formatted_str) rows = await cursor.fetchall() await cursor.close() ret: Dict[bytes32, BlockRecord] = {} for row in rows: header_hash_bytes, block_record_bytes = row header_hash = bytes.fromhex(header_hash_bytes) ret[header_hash] = BlockRecord.from_bytes(block_record_bytes) return ret async def get_peak_heights_dicts(self) -> Tuple[Dict[uint32, bytes32], Dict[uint32, SubEpochSummary]]: """ Returns a dictionary with all blocks, as well as the header hash of the peak, if present. """ res = await self.db.execute("SELECT header_hash from block_records WHERE is_peak = 1") row = await res.fetchone() await res.close() if row is None: return {}, {} peak: bytes32 = bytes.fromhex(row[0]) cursor = await self.db.execute("SELECT header_hash,prev_hash,height,sub_epoch_summary from block_records") rows = await cursor.fetchall() await cursor.close() hash_to_prev_hash: Dict[bytes32, bytes32] = {} hash_to_height: Dict[bytes32, uint32] = {} hash_to_summary: Dict[bytes32, SubEpochSummary] = {} for row in rows: hash_to_prev_hash[bytes.fromhex(row[0])] = bytes.fromhex(row[1]) hash_to_height[bytes.fromhex(row[0])] = row[2] if row[3] is not None: hash_to_summary[bytes.fromhex(row[0])] = SubEpochSummary.from_bytes(row[3]) height_to_hash: Dict[uint32, bytes32] = {} sub_epoch_summaries: Dict[uint32, SubEpochSummary] = {} curr_header_hash = peak curr_height = hash_to_height[curr_header_hash] while True: height_to_hash[curr_height] = curr_header_hash if curr_header_hash in hash_to_summary: sub_epoch_summaries[curr_height] = hash_to_summary[curr_header_hash] if curr_height == 0: break curr_header_hash = hash_to_prev_hash[curr_header_hash] curr_height = hash_to_height[curr_header_hash] return height_to_hash, sub_epoch_summaries
from dataclasses import dataclass from typing import Dict, List, Optional, Tuple import aiosqlite from beer.consensus.block_record import BlockRecord from beer.types.blockchain_format.sized_bytes import bytes32 from beer.types.blockchain_format.sub_epoch_summary import SubEpochSummary from beer.types.coin_spend import CoinSpend from beer.types.header_block import HeaderBlock from beer.util.db_wrapper import DBWrapper from beer.util.ints import uint32, uint64 from beer.util.lru_cache import LRUCache from beer.util.streamable import Streamable, streamable from beer.wallet.block_record import HeaderBlockRecord @dataclass(frozen=True) @streamable class AdditionalCoinSpends(Streamable): coin_spends_list: List[CoinSpend] class WalletBlockStore: """ This object handles HeaderBlocks and Blocks stored in DB used by wallet. """ db: aiosqlite.Connection db_wrapper: DBWrapper block_cache: LRUCache @classmethod async def create(cls, db_wrapper: DBWrapper): self = cls() self.db_wrapper = db_wrapper self.db = db_wrapper.db await self.db.execute("pragma journal_mode=wal") await self.db.execute("pragma synchronous=2") await self.db.execute( "CREATE TABLE IF NOT EXISTS header_blocks(header_hash text PRIMARY KEY, height int," " timestamp int, block blob)" ) await self.db.execute("CREATE INDEX IF NOT EXISTS header_hash on header_blocks(header_hash)") await self.db.execute("CREATE INDEX IF NOT EXISTS timestamp on header_blocks(timestamp)") await self.db.execute("CREATE INDEX IF NOT EXISTS height on header_blocks(height)") # Block records await self.db.execute( "CREATE TABLE IF NOT EXISTS block_records(header_hash " "text PRIMARY KEY, prev_hash text, height bigint, weight bigint, total_iters text," "block blob, sub_epoch_summary blob, is_peak tinyint)" ) await self.db.execute( "CREATE TABLE IF NOT EXISTS additional_coin_spends(header_hash text PRIMARY KEY, spends_list_blob blob)" ) # Height index so we can look up in order of height for sync purposes await self.db.execute("CREATE INDEX IF NOT EXISTS height on block_records(height)") await self.db.execute("CREATE INDEX IF NOT EXISTS hh on block_records(header_hash)") await self.db.execute("CREATE INDEX IF NOT EXISTS peak on block_records(is_peak)") await self.db.commit() self.block_cache = LRUCache(1000) return self async def _clear_database(self): cursor_2 = await self.db.execute("DELETE FROM header_blocks") await cursor_2.close() await self.db.commit() async def add_block_record( self, header_block_record: HeaderBlockRecord, block_record: BlockRecord, additional_coin_spends: List[CoinSpend], ): """ Adds a block record to the database. This block record is assumed to be connected to the chain, but it may or may not be in the LCA path. """ cached = self.block_cache.get(header_block_record.header_hash) if cached is not None: # Since write to db can fail, we remove from cache here to avoid potential inconsistency # Adding to cache only from reading self.block_cache.put(header_block_record.header_hash, None) if header_block_record.header.foliage_transaction_block is not None: timestamp = header_block_record.header.foliage_transaction_block.timestamp else: timestamp = uint64(0) cursor = await self.db.execute( "INSERT OR REPLACE INTO header_blocks VALUES(?, ?, ?, ?)", ( header_block_record.header_hash.hex(), header_block_record.height, timestamp, bytes(header_block_record), ), ) await cursor.close() cursor_2 = await self.db.execute( "INSERT OR REPLACE INTO block_records VALUES(?, ?, ?, ?, ?, ?, ?,?)", ( header_block_record.header.header_hash.hex(), header_block_record.header.prev_header_hash.hex(), header_block_record.header.height, header_block_record.header.weight.to_bytes(128 // 8, "big", signed=False).hex(), header_block_record.header.total_iters.to_bytes(128 // 8, "big", signed=False).hex(), bytes(block_record), None if block_record.sub_epoch_summary_included is None else bytes(block_record.sub_epoch_summary_included), False, ), ) await cursor_2.close() if len(additional_coin_spends) > 0: blob: bytes = bytes(AdditionalCoinSpends(additional_coin_spends)) cursor_3 = await self.db.execute( "INSERT OR REPLACE INTO additional_coin_spends VALUES(?, ?)", (header_block_record.header_hash.hex(), blob), ) await cursor_3.close() async def get_header_block_at(self, heights: List[uint32]) -> List[HeaderBlock]: if len(heights) == 0: return [] heights_db = tuple(heights) formatted_str = f'SELECT block from header_blocks WHERE height in ({"?," * (len(heights_db) - 1)}?)' cursor = await self.db.execute(formatted_str, heights_db) rows = await cursor.fetchall() await cursor.close() return [HeaderBlock.from_bytes(row[0]) for row in rows] async def get_header_block_record(self, header_hash: bytes32) -> Optional[HeaderBlockRecord]: """Gets a block record from the database, if present""" cached = self.block_cache.get(header_hash) if cached is not None: return cached cursor = await self.db.execute("SELECT block from header_blocks WHERE header_hash=?", (header_hash.hex(),)) row = await cursor.fetchone() await cursor.close() if row is not None: hbr: HeaderBlockRecord = HeaderBlockRecord.from_bytes(row[0]) self.block_cache.put(hbr.header_hash, hbr) return hbr else: return None async def get_additional_coin_spends(self, header_hash: bytes32) -> Optional[List[CoinSpend]]: cursor = await self.db.execute( "SELECT spends_list_blob from additional_coin_spends WHERE header_hash=?", (header_hash.hex(),) ) row = await cursor.fetchone() await cursor.close() if row is not None: coin_spends: AdditionalCoinSpends = AdditionalCoinSpends.from_bytes(row[0]) return coin_spends.coin_spends_list else: return None async def get_block_record(self, header_hash: bytes32) -> Optional[BlockRecord]: cursor = await self.db.execute( "SELECT block from block_records WHERE header_hash=?", (header_hash.hex(),), ) row = await cursor.fetchone() await cursor.close() if row is not None: return BlockRecord.from_bytes(row[0]) return None async def get_block_records( self, ) -> Tuple[Dict[bytes32, BlockRecord], Optional[bytes32]]: """ Returns a dictionary with all blocks, as well as the header hash of the peak, if present. """ cursor = await self.db.execute("SELECT header_hash, block, is_peak from block_records") rows = await cursor.fetchall() await cursor.close() ret: Dict[bytes32, BlockRecord] = {} peak: Optional[bytes32] = None for row in rows: header_hash_bytes, block_record_bytes, is_peak = row header_hash = bytes.fromhex(header_hash_bytes) ret[header_hash] = BlockRecord.from_bytes(block_record_bytes) if is_peak: assert peak is None # Sanity check, only one peak peak = header_hash return ret, peak def rollback_cache_block(self, header_hash: bytes32): self.block_cache.remove(header_hash) async def set_peak(self, header_hash: bytes32) -> None: cursor_1 = await self.db.execute("UPDATE block_records SET is_peak=0 WHERE is_peak=1") await cursor_1.close() cursor_2 = await self.db.execute( "UPDATE block_records SET is_peak=1 WHERE header_hash=?", (header_hash.hex(),), ) await cursor_2.close() async def get_block_records_close_to_peak( self, blocks_n: int ) -> Tuple[Dict[bytes32, BlockRecord], Optional[bytes32]]: """ Returns a dictionary with all blocks, as well as the header hash of the peak, if present. """ res = await self.db.execute("SELECT header_hash, height from block_records WHERE is_peak = 1") row = await res.fetchone() await res.close() if row is None: return {}, None header_hash_bytes, peak_height = row peak: bytes32 = bytes32(bytes.fromhex(header_hash_bytes)) formatted_str = f"SELECT header_hash, block from block_records WHERE height >= {peak_height - blocks_n}" cursor = await self.db.execute(formatted_str) rows = await cursor.fetchall() await cursor.close() ret: Dict[bytes32, BlockRecord] = {} for row in rows: header_hash_bytes, block_record_bytes = row header_hash = bytes.fromhex(header_hash_bytes) ret[header_hash] = BlockRecord.from_bytes(block_record_bytes) return ret, peak async def get_header_blocks_in_range( self, start: int, stop: int, ) -> Dict[bytes32, HeaderBlock]: formatted_str = f"SELECT header_hash, block from header_blocks WHERE height >= {start} and height <= {stop}" cursor = await self.db.execute(formatted_str) rows = await cursor.fetchall() await cursor.close() ret: Dict[bytes32, HeaderBlock] = {} for row in rows: header_hash_bytes, block_record_bytes = row header_hash = bytes.fromhex(header_hash_bytes) ret[header_hash] = HeaderBlock.from_bytes(block_record_bytes) return ret async def get_block_records_in_range( self, start: int, stop: int, ) -> Dict[bytes32, BlockRecord]: """ Returns a dictionary with all blocks, as well as the header hash of the peak, if present. """ formatted_str = f"SELECT header_hash, block from block_records WHERE height >= {start} and height <= {stop}" cursor = await self.db.execute(formatted_str) rows = await cursor.fetchall() await cursor.close() ret: Dict[bytes32, BlockRecord] = {} for row in rows: header_hash_bytes, block_record_bytes = row header_hash = bytes.fromhex(header_hash_bytes) ret[header_hash] = BlockRecord.from_bytes(block_record_bytes) return ret async def get_peak_heights_dicts(self) -> Tuple[Dict[uint32, bytes32], Dict[uint32, SubEpochSummary]]: """ Returns a dictionary with all blocks, as well as the header hash of the peak, if present. """ res = await self.db.execute("SELECT header_hash from block_records WHERE is_peak = 1") row = await res.fetchone() await res.close() if row is None: return {}, {} peak: bytes32 = bytes.fromhex(row[0]) cursor = await self.db.execute("SELECT header_hash,prev_hash,height,sub_epoch_summary from block_records") rows = await cursor.fetchall() await cursor.close() hash_to_prev_hash: Dict[bytes32, bytes32] = {} hash_to_height: Dict[bytes32, uint32] = {} hash_to_summary: Dict[bytes32, SubEpochSummary] = {} for row in rows: hash_to_prev_hash[bytes.fromhex(row[0])] = bytes.fromhex(row[1]) hash_to_height[bytes.fromhex(row[0])] = row[2] if row[3] is not None: hash_to_summary[bytes.fromhex(row[0])] = SubEpochSummary.from_bytes(row[3]) height_to_hash: Dict[uint32, bytes32] = {} sub_epoch_summaries: Dict[uint32, SubEpochSummary] = {} curr_header_hash = peak curr_height = hash_to_height[curr_header_hash] while True: height_to_hash[curr_height] = curr_header_hash if curr_header_hash in hash_to_summary: sub_epoch_summaries[curr_height] = hash_to_summary[curr_header_hash] if curr_height == 0: break curr_header_hash = hash_to_prev_hash[curr_header_hash] curr_height = hash_to_height[curr_header_hash] return height_to_hash, sub_epoch_summaries
import json import logging from abc import ABC import slack_actions from ws_sdk.web import WS import slack_format class Report(ABC): def __init__(self, ws_conn_details: dict, config: dict, ws_connector: WS): self.ws_conn_details = ws_conn_details self.config = config self.ws_connector = ws_connector self.execute() if is_valid_config(self.ws_conn_details, self.mandatory_values, self.config['MandatoryWsConnProp'] + self.MANDATORY_PROPS) else KeyError def create_report_metadata(self) -> tuple: scope = self.ws_connector.get_scope_by_token(token=self.ws_conn_details['ws_scope_token']) channel = self.get_channel_name(scope_type=scope['type'], scope_token=self.ws_conn_details['ws_scope_token']) header_text = f"{scope["type"]} {scope["name"]} {self.report_name}" return scope, channel, header_text def get_channel_name(self, scope_type, scope_token) -> str: channel_name = f"{self.config["ChannelPrefix"]}{scope_type}_{self.ws_connector.get_scope_name_by_token(token=scope_token)}" return slack_actions.fix_slack_channel_name(channel_name) class Alerts(Report): MANDATORY_PROPS = ['ws_scope_token'] report_name = "Library Vulnerability report" def execute(self): alerts = self.ws_connector.get_alerts(token=self.ws_conn_details['ws_scope_token']) scope, channel, header_text = self.create_report_metadata() block = [] slack_actions.send_to_slack(channel=channel, block=json.dumps(block)) logging.info(f"Successfully executed: {self.report_name}") class LibVulnerabilities(Report): MANDATORY_PROPS = ['ws_scope_token'] report_name = "Library Vulnerability report" def execute(self): libs = self.ws_connector.get_vulnerabilities_per_lib(token=self.ws_conn_details['ws_scope_token']) scope, channel, header_text = self.create_report_metadata() block = slack_format.create_lib_vul_block(header_text, libs) slack_actions.send_to_slack(channel=channel, block=json.dumps(block)) logging.info(f"Successfully executed: {self.report_name}") def is_valid_config(matched_dict, mandatory_keys): # TODO: Add syntax validation and perhaps connectivity test? for key in mandatory_keys: if matched_dict.get(key) is None: logging.error(f"Missing {key}") return False return True
import json import logging from abc import ABC import slack_actions from ws_sdk.web import WS import slack_format class Report(ABC): def __init__(self, ws_conn_details: dict, config: dict, ws_connector: WS): self.ws_conn_details = ws_conn_details self.config = config self.ws_connector = ws_connector self.execute() if is_valid_config(self.ws_conn_details, self.mandatory_values, self.config['MandatoryWsConnProp'] + self.MANDATORY_PROPS) else KeyError def create_report_metadata(self) -> tuple: scope = self.ws_connector.get_scope_by_token(token=self.ws_conn_details['ws_scope_token']) channel = self.get_channel_name(scope_type=scope['type'], scope_token=self.ws_conn_details['ws_scope_token']) header_text = f"{scope['type']} {scope['name']} {self.report_name}" return scope, channel, header_text def get_channel_name(self, scope_type, scope_token) -> str: channel_name = f"{self.config['ChannelPrefix']}{scope_type}_{self.ws_connector.get_scope_name_by_token(token=scope_token)}" return slack_actions.fix_slack_channel_name(channel_name) class Alerts(Report): MANDATORY_PROPS = ['ws_scope_token'] report_name = "Library Vulnerability report" def execute(self): alerts = self.ws_connector.get_alerts(token=self.ws_conn_details['ws_scope_token']) scope, channel, header_text = self.create_report_metadata() block = [] slack_actions.send_to_slack(channel=channel, block=json.dumps(block)) logging.info(f"Successfully executed: {self.report_name}") class LibVulnerabilities(Report): MANDATORY_PROPS = ['ws_scope_token'] report_name = "Library Vulnerability report" def execute(self): libs = self.ws_connector.get_vulnerabilities_per_lib(token=self.ws_conn_details['ws_scope_token']) scope, channel, header_text = self.create_report_metadata() block = slack_format.create_lib_vul_block(header_text, libs) slack_actions.send_to_slack(channel=channel, block=json.dumps(block)) logging.info(f"Successfully executed: {self.report_name}") def is_valid_config(matched_dict, mandatory_keys): # TODO: Add syntax validation and perhaps connectivity test? for key in mandatory_keys: if matched_dict.get(key) is None: logging.error(f"Missing {key}") return False return True
import _thread import argparse import logging import os import time import webbrowser from threading import Thread from cryptography.hazmat.backends import default_backend as crypto_default_backend from cryptography.hazmat.primitives import serialization as crypto_serialization from cryptography.hazmat.primitives.asymmetric import rsa from flask import Flask, request, jsonify from flask_cors import CORS, cross_origin from git import Repo, InvalidGitRepositoryError from halo import Halo from sshconf import read_ssh_config, empty_ssh_config_file from waitress import serve from starmart.config.config import Config class Action(object): def __init__(self, args): self.config = Config.default_config() self.args = args def act(self): raise NotImplementedError(f'act not implemented in {type(self).__name__}') @classmethod def get_action(cls): actions = dict({ 'deploy': DeployAction, 'init': InitAction, 'clone': CloneAction }) args = cls.__parse_arguments__() action = actions.get(args.action[0]) if action is None: raise ValueError('Action should be deploy, init or clone') return action(args) @classmethod def __parse_arguments__(cls): # configuring arguments parser = argparse.ArgumentParser() parser.add_argument('action', nargs=1, type=str, default='None', help='Run init on a new project, deploy to push the code or clone <project_id> to retrieve an existing project') parser.add_argument('project_id', nargs='?', help='The project id', default=None) return parser.parse_args() class InitAction(Action): def act(self): self.__auth_with_web_browser__() def __auth_with_web_browser__(self): webbrowser.open(f'{self.config.authentication_host()}/development/login') self.__start_server__() def __start_server__(self): """ This method blocks but all requests end using the `exit_after_seconds()` function """ app = Flask(__name__) CORS(app) app.config['CORS_HEADERS'] = 'Content-Type' log = logging.getLogger('werkzeug') log.setLevel(logging.ERROR) spinner = Halo(text='Waiting for browser authorization', spinner='dots') spinner.start() @app.route('/set-remote', methods=['POST']) @cross_origin() def set_remote(): public_key = self.__get_or_create_ssh_public_key__() spinner.stop() self.__clone_after_webbrowser_auth__(request.json['remote']) return jsonify({'publicKey': public_key}) @app.route('/set-clone', methods=['POST']) @cross_origin() def set_clone(): spinner.stop() print(f'You already have an existing empty repository. Try calling', bold(f'starmart clone {request.json['repo_id']}')) exit_after_seconds() return jsonify({'status': 'ok'}) serve(app, host="127.0.0.1", port=4999) def __clone_after_webbrowser_auth__(self, url: str): remote_host = self.config.git_remote_host() if not url.startswith(remote_host): raise ValueError(f'URL does not match the authentication host: {remote_host}') repo = self.__do_clone_default_code__() repo.create_remote('starmart', url=url) print('Happy coding!') # this is needed to exit flask server -> first it needs to return and then exit exit_after_seconds() @Halo(text='Cloning starter code repo', spinner='dots') def __do_clone_default_code__(self): cloned = Repo.clone_from(self.config.github_repo(), 'starter_code') for r in cloned.remotes: if r.name == 'origin': cloned.delete_remote(r) break return cloned def __get_or_create_ssh_public_key__(self): home = os.path.expanduser('~') ssh_dir = os.path.join(home, '.ssh') config_path = os.path.join(home, '.ssh', 'config') if not os.path.exists(ssh_dir): os.mkdir(ssh_dir) if not os.path.exists(config_path): config_file = empty_ssh_config_file() public_key = self.__create_and_write_ssh_keypair_and_update_config__(config_file) config_file.write(config_path) return public_key config_file = read_ssh_config(config_path) if self.config.user_git_host() in config_file.hosts(): git_ssh_config = config_file.host(self.config.user_git_host()) public_key_path = f"{git_ssh_config["identityfile"]}.pub" with open(public_key_path, 'r') as f: return ''.join(f.readlines()) public_key = self.__create_and_write_ssh_keypair_and_update_config__(config_file) config_file.save() return public_key def __create_and_write_ssh_keypair_and_update_config__(self, config_file): key = rsa.generate_private_key( backend=crypto_default_backend(), public_exponent=65537, key_size=2048 ) private_key = key.private_bytes( crypto_serialization.Encoding.PEM, crypto_serialization.PrivateFormat.PKCS8, crypto_serialization.NoEncryption() ) public_key = key.public_key().public_bytes( crypto_serialization.Encoding.OpenSSH, crypto_serialization.PublicFormat.OpenSSH ) home = os.path.expanduser('~') ssh_dir = os.path.join(home, '.ssh') # the file is opened this way in order to be created with the correct permissions with open(os.open(os.path.join(ssh_dir, 'gitlab'), os.O_CREAT | os.O_WRONLY, 0o400), 'wb') as f: f.write(private_key) with open(os.path.join(ssh_dir, 'gitlab.pub'), 'wb') as f: f.write(public_key) config_file.add(self.config.user_git_host(), Hostname=self.config.user_git_host(), IdentityFile=os.path.join(ssh_dir, 'gitlab')) return public_key.decode('utf-8') class DeployAction(Action): def act(self): self.__configure_repo__() @Halo(text='Pushing latest commits', spinner='dots') def __configure_repo__(self): try: repo = Repo('.') remote = None for r in repo.remotes: if r.name == 'starmart': remote = r break if remote is None: raise ValueError(f'The repository does not contain the starmart remote. Please call' + f' {bold('starmart init')}, before calling {bold('starmart deploy')}.') remote.push(refspec="main:main") except InvalidGitRepositoryError: raise ValueError('Github repository not initialized. Call starmart init before calling starmart deploy.') finally: print('\nPushed. Happy coding!') class CloneAction(Action): def act(self): self.__clone_repo__() def __clone_repo__(self): project_id = self.args.project_id if project_id is None: raise ValueError(bold('starmart clone') + ' needs the project id') spinner = Halo(text=f'Cloning project {project_id}', spinner='dots') spinner.start() repo = Repo.clone_from(f'{self.config.git_remote_host()}/{project_id}.git', f'starmart_project_{project_id}') repo.remote('origin').rename('starmart') spinner.stop() print('Cloned. Happy coding!') def exit_after_seconds(seconds=2): def do_exit(): time.sleep(seconds) _thread.interrupt_main() Thread(target=do_exit).start() def bold(text): return '\033[1m' + text + '\033[0m' def main(): Action.get_action().act() if __name__ == '__main__': try: main() except KeyboardInterrupt: exit(0)
import _thread import argparse import logging import os import time import webbrowser from threading import Thread from cryptography.hazmat.backends import default_backend as crypto_default_backend from cryptography.hazmat.primitives import serialization as crypto_serialization from cryptography.hazmat.primitives.asymmetric import rsa from flask import Flask, request, jsonify from flask_cors import CORS, cross_origin from git import Repo, InvalidGitRepositoryError from halo import Halo from sshconf import read_ssh_config, empty_ssh_config_file from waitress import serve from starmart.config.config import Config class Action(object): def __init__(self, args): self.config = Config.default_config() self.args = args def act(self): raise NotImplementedError(f'act not implemented in {type(self).__name__}') @classmethod def get_action(cls): actions = dict({ 'deploy': DeployAction, 'init': InitAction, 'clone': CloneAction }) args = cls.__parse_arguments__() action = actions.get(args.action[0]) if action is None: raise ValueError('Action should be deploy, init or clone') return action(args) @classmethod def __parse_arguments__(cls): # configuring arguments parser = argparse.ArgumentParser() parser.add_argument('action', nargs=1, type=str, default='None', help='Run init on a new project, deploy to push the code or clone <project_id> to retrieve an existing project') parser.add_argument('project_id', nargs='?', help='The project id', default=None) return parser.parse_args() class InitAction(Action): def act(self): self.__auth_with_web_browser__() def __auth_with_web_browser__(self): webbrowser.open(f'{self.config.authentication_host()}/development/login') self.__start_server__() def __start_server__(self): """ This method blocks but all requests end using the `exit_after_seconds()` function """ app = Flask(__name__) CORS(app) app.config['CORS_HEADERS'] = 'Content-Type' log = logging.getLogger('werkzeug') log.setLevel(logging.ERROR) spinner = Halo(text='Waiting for browser authorization', spinner='dots') spinner.start() @app.route('/set-remote', methods=['POST']) @cross_origin() def set_remote(): public_key = self.__get_or_create_ssh_public_key__() spinner.stop() self.__clone_after_webbrowser_auth__(request.json['remote']) return jsonify({'publicKey': public_key}) @app.route('/set-clone', methods=['POST']) @cross_origin() def set_clone(): spinner.stop() print(f'You already have an existing empty repository. Try calling', bold(f'starmart clone {request.json["repo_id"]}')) exit_after_seconds() return jsonify({'status': 'ok'}) serve(app, host="127.0.0.1", port=4999) def __clone_after_webbrowser_auth__(self, url: str): remote_host = self.config.git_remote_host() if not url.startswith(remote_host): raise ValueError(f'URL does not match the authentication host: {remote_host}') repo = self.__do_clone_default_code__() repo.create_remote('starmart', url=url) print('Happy coding!') # this is needed to exit flask server -> first it needs to return and then exit exit_after_seconds() @Halo(text='Cloning starter code repo', spinner='dots') def __do_clone_default_code__(self): cloned = Repo.clone_from(self.config.github_repo(), 'starter_code') for r in cloned.remotes: if r.name == 'origin': cloned.delete_remote(r) break return cloned def __get_or_create_ssh_public_key__(self): home = os.path.expanduser('~') ssh_dir = os.path.join(home, '.ssh') config_path = os.path.join(home, '.ssh', 'config') if not os.path.exists(ssh_dir): os.mkdir(ssh_dir) if not os.path.exists(config_path): config_file = empty_ssh_config_file() public_key = self.__create_and_write_ssh_keypair_and_update_config__(config_file) config_file.write(config_path) return public_key config_file = read_ssh_config(config_path) if self.config.user_git_host() in config_file.hosts(): git_ssh_config = config_file.host(self.config.user_git_host()) public_key_path = f"{git_ssh_config['identityfile']}.pub" with open(public_key_path, 'r') as f: return ''.join(f.readlines()) public_key = self.__create_and_write_ssh_keypair_and_update_config__(config_file) config_file.save() return public_key def __create_and_write_ssh_keypair_and_update_config__(self, config_file): key = rsa.generate_private_key( backend=crypto_default_backend(), public_exponent=65537, key_size=2048 ) private_key = key.private_bytes( crypto_serialization.Encoding.PEM, crypto_serialization.PrivateFormat.PKCS8, crypto_serialization.NoEncryption() ) public_key = key.public_key().public_bytes( crypto_serialization.Encoding.OpenSSH, crypto_serialization.PublicFormat.OpenSSH ) home = os.path.expanduser('~') ssh_dir = os.path.join(home, '.ssh') # the file is opened this way in order to be created with the correct permissions with open(os.open(os.path.join(ssh_dir, 'gitlab'), os.O_CREAT | os.O_WRONLY, 0o400), 'wb') as f: f.write(private_key) with open(os.path.join(ssh_dir, 'gitlab.pub'), 'wb') as f: f.write(public_key) config_file.add(self.config.user_git_host(), Hostname=self.config.user_git_host(), IdentityFile=os.path.join(ssh_dir, 'gitlab')) return public_key.decode('utf-8') class DeployAction(Action): def act(self): self.__configure_repo__() @Halo(text='Pushing latest commits', spinner='dots') def __configure_repo__(self): try: repo = Repo('.') remote = None for r in repo.remotes: if r.name == 'starmart': remote = r break if remote is None: raise ValueError(f'The repository does not contain the starmart remote. Please call' + f' {bold("starmart init")}, before calling {bold("starmart deploy")}.') remote.push(refspec="main:main") except InvalidGitRepositoryError: raise ValueError('Github repository not initialized. Call starmart init before calling starmart deploy.') finally: print('\nPushed. Happy coding!') class CloneAction(Action): def act(self): self.__clone_repo__() def __clone_repo__(self): project_id = self.args.project_id if project_id is None: raise ValueError(bold('starmart clone') + ' needs the project id') spinner = Halo(text=f'Cloning project {project_id}', spinner='dots') spinner.start() repo = Repo.clone_from(f'{self.config.git_remote_host()}/{project_id}.git', f'starmart_project_{project_id}') repo.remote('origin').rename('starmart') spinner.stop() print('Cloned. Happy coding!') def exit_after_seconds(seconds=2): def do_exit(): time.sleep(seconds) _thread.interrupt_main() Thread(target=do_exit).start() def bold(text): return '\033[1m' + text + '\033[0m' def main(): Action.get_action().act() if __name__ == '__main__': try: main() except KeyboardInterrupt: exit(0)
# Copyright (c) 2021 elParaguayo # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import os import shutil import subprocess import libqtile.bar import libqtile.config import libqtile.confreader import libqtile.layout import pytest from dbus_next.constants import BusType from qtile_extras.widget.upower import UPowerWidget from test.helpers import Retry # noqa: I001 @Retry(ignore_exceptions=(AssertionError, )) def battery_found(manager): """Waits for widget to report batteries.""" _, output = manager.c.widget["upowerwidget"].eval("len(self.batteries)") while int(output) == 0: # If there are no batteries (shouldn't happen) try looking again. manager.c.widget["upowerwidget"].eval("import asyncio;asyncio.create_task(self._find_batteries())") assert False assert True @Retry(ignore_exceptions=(AssertionError, )) def text_hidden(manager, target): """Waits for widget to hide text.""" assert manager.c.widget["upowerwidget"].info()["width"] == target @pytest.fixture def powerwidget(monkeypatch): """Patch the widget to use the fake dbus service.""" monkeypatch.setattr("qtile_extras.widget.upower.UPOWER_SERVICE", "test.qtileextras.upower") monkeypatch.setattr("qtile_extras.widget.upower.UPOWER_BUS", BusType.SESSION) yield UPowerWidget @pytest.fixture(scope="function") def powerconfig(request, powerwidget): """Config for the UPower widget. Parameters set via request.""" class PowerConfig(libqtile.confreader.Config): auto_fullscreen = True keys = [ ] mouse = [] groups = [ libqtile.config.Group("a"), ] layouts = [libqtile.layout.Max()] floating_layout = libqtile.resources.default_config.floating_layout screens = [ libqtile.config.Screen( top=libqtile.bar.Bar( [ powerwidget(**getattr(request, "param", dict())) ], 50, ), ) ] yield PowerConfig upower_dbus_servive = pytest.mark.usefixtures("dbus_thread") @upower_dbus_servive def test_upower_all_batteries(manager_nospawn, powerconfig): manager_nospawn.start(powerconfig) battery_found(manager_nospawn) assert len(manager_nospawn.c.widget["upowerwidget"].info()["batteries"]) == 2 @upower_dbus_servive @pytest.mark.parametrize("powerconfig", [{"battery_name": "BAT1"}], indirect=True) def test_upower_named_battery(manager_nospawn, powerconfig): manager_nospawn.start(powerconfig) battery_found(manager_nospawn) assert len(manager_nospawn.c.widget["upowerwidget"].info()["batteries"]) == 1 assert manager_nospawn.c.widget["upowerwidget"].info()["batteries"][0]["name"] == "BAT1" assert manager_nospawn.c.widget["upowerwidget"].info()["batteries"][0]["status"] == "Normal" @upower_dbus_servive @pytest.mark.parametrize("powerconfig", [{"battery_name": "BAT1", "percentage_low": 0.6}], indirect=True) def test_upower_low_battery(manager_nospawn, powerconfig): manager_nospawn.start(powerconfig) battery_found(manager_nospawn) assert len(manager_nospawn.c.widget["upowerwidget"].info()["batteries"]) == 1 assert manager_nospawn.c.widget["upowerwidget"].info()["batteries"][0]["status"] == "Low" @upower_dbus_servive @pytest.mark.parametrize( "powerconfig", [{"battery_name": "BAT1", "percentage_low": 0.7, "percentage_critical": 0.55}], indirect=True ) def test_upower_critical_battery(manager_nospawn, powerconfig): manager_nospawn.start(powerconfig) battery_found(manager_nospawn) assert len(manager_nospawn.c.widget["upowerwidget"].info()["batteries"]) == 1 assert manager_nospawn.c.widget["upowerwidget"].info()["batteries"][0]["status"] == "Critical" @upower_dbus_servive @pytest.mark.parametrize("powerconfig", [{"battery_name": "BAT1"}], indirect=True) def test_upower_charging(manager_nospawn, powerconfig): manager_nospawn.start(powerconfig) battery_found(manager_nospawn) assert len(manager_nospawn.c.widget["upowerwidget"].info()["batteries"]) == 1 assert manager_nospawn.c.widget["upowerwidget"].info()["batteries"][0]["name"] == "BAT1" assert not manager_nospawn.c.widget["upowerwidget"].info()["charging"] assert manager_nospawn.c.widget["upowerwidget"].info()["batteries"][0]["tte"] == "3:23" assert not manager_nospawn.c.widget["upowerwidget"].info()["batteries"][0]["ttf"] # Trigger our method to toggle the charging state of the batteries dbussend = shutil.which("dbus-send") subprocess.run([ dbussend, f"--bus={os.environ["DBUS_SESSION_BUS_ADDRESS"]}", "--type=method_call", "--dest=test.qtileextras.upower", "/org/freedesktop/UPower", "org.freedesktop.UPower.toggle_charge" ]) assert manager_nospawn.c.widget["upowerwidget"].info()["charging"] assert manager_nospawn.c.widget["upowerwidget"].info()["batteries"][0]["ttf"] == "1:03" assert not manager_nospawn.c.widget["upowerwidget"].info()["batteries"][0]["tte"] @upower_dbus_servive @pytest.mark.parametrize("powerconfig", [{"battery_name": "BAT1", "text_displaytime": 0.5}], indirect=True) def test_upower_show_text(manager_nospawn, powerconfig): manager_nospawn.start(powerconfig) battery_found(manager_nospawn) assert len(manager_nospawn.c.widget["upowerwidget"].info()["batteries"]) == 1 assert manager_nospawn.c.widget["upowerwidget"].info()["batteries"][0]["name"] == "BAT1" orig_width = manager_nospawn.c.widget["upowerwidget"].info()["width"] # Click on widget shows text so it should be wider now manager_nospawn.c.bar["top"].fake_button_press(0, "top", 0, 0, 1) assert manager_nospawn.c.widget["upowerwidget"].info()["width"] != orig_width # Click again to hide text so it's back to original width manager_nospawn.c.bar["top"].fake_button_press(0, "top", 0, 0, 1) assert manager_nospawn.c.widget["upowerwidget"].info()["width"] == orig_width # Check this still works when battery is charging # Trigger our method to toggle the charging state of the batteries dbussend = shutil.which("dbus-send") subprocess.run([ dbussend, f"--bus={os.environ["DBUS_SESSION_BUS_ADDRESS"]}", "--type=method_call", "--dest=test.qtileextras.upower", "/org/freedesktop/UPower", "org.freedesktop.UPower.toggle_charge" ]) # Click on widget shows text so it should be wider now manager_nospawn.c.bar["top"].fake_button_press(0, "top", 0, 0, 1) assert manager_nospawn.c.widget["upowerwidget"].info()["width"] != orig_width # Let the timer hide the text text_hidden(manager_nospawn, orig_width)
# Copyright (c) 2021 elParaguayo # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import os import shutil import subprocess import libqtile.bar import libqtile.config import libqtile.confreader import libqtile.layout import pytest from dbus_next.constants import BusType from qtile_extras.widget.upower import UPowerWidget from test.helpers import Retry # noqa: I001 @Retry(ignore_exceptions=(AssertionError, )) def battery_found(manager): """Waits for widget to report batteries.""" _, output = manager.c.widget["upowerwidget"].eval("len(self.batteries)") while int(output) == 0: # If there are no batteries (shouldn't happen) try looking again. manager.c.widget["upowerwidget"].eval("import asyncio;asyncio.create_task(self._find_batteries())") assert False assert True @Retry(ignore_exceptions=(AssertionError, )) def text_hidden(manager, target): """Waits for widget to hide text.""" assert manager.c.widget["upowerwidget"].info()["width"] == target @pytest.fixture def powerwidget(monkeypatch): """Patch the widget to use the fake dbus service.""" monkeypatch.setattr("qtile_extras.widget.upower.UPOWER_SERVICE", "test.qtileextras.upower") monkeypatch.setattr("qtile_extras.widget.upower.UPOWER_BUS", BusType.SESSION) yield UPowerWidget @pytest.fixture(scope="function") def powerconfig(request, powerwidget): """Config for the UPower widget. Parameters set via request.""" class PowerConfig(libqtile.confreader.Config): auto_fullscreen = True keys = [ ] mouse = [] groups = [ libqtile.config.Group("a"), ] layouts = [libqtile.layout.Max()] floating_layout = libqtile.resources.default_config.floating_layout screens = [ libqtile.config.Screen( top=libqtile.bar.Bar( [ powerwidget(**getattr(request, "param", dict())) ], 50, ), ) ] yield PowerConfig upower_dbus_servive = pytest.mark.usefixtures("dbus_thread") @upower_dbus_servive def test_upower_all_batteries(manager_nospawn, powerconfig): manager_nospawn.start(powerconfig) battery_found(manager_nospawn) assert len(manager_nospawn.c.widget["upowerwidget"].info()["batteries"]) == 2 @upower_dbus_servive @pytest.mark.parametrize("powerconfig", [{"battery_name": "BAT1"}], indirect=True) def test_upower_named_battery(manager_nospawn, powerconfig): manager_nospawn.start(powerconfig) battery_found(manager_nospawn) assert len(manager_nospawn.c.widget["upowerwidget"].info()["batteries"]) == 1 assert manager_nospawn.c.widget["upowerwidget"].info()["batteries"][0]["name"] == "BAT1" assert manager_nospawn.c.widget["upowerwidget"].info()["batteries"][0]["status"] == "Normal" @upower_dbus_servive @pytest.mark.parametrize("powerconfig", [{"battery_name": "BAT1", "percentage_low": 0.6}], indirect=True) def test_upower_low_battery(manager_nospawn, powerconfig): manager_nospawn.start(powerconfig) battery_found(manager_nospawn) assert len(manager_nospawn.c.widget["upowerwidget"].info()["batteries"]) == 1 assert manager_nospawn.c.widget["upowerwidget"].info()["batteries"][0]["status"] == "Low" @upower_dbus_servive @pytest.mark.parametrize( "powerconfig", [{"battery_name": "BAT1", "percentage_low": 0.7, "percentage_critical": 0.55}], indirect=True ) def test_upower_critical_battery(manager_nospawn, powerconfig): manager_nospawn.start(powerconfig) battery_found(manager_nospawn) assert len(manager_nospawn.c.widget["upowerwidget"].info()["batteries"]) == 1 assert manager_nospawn.c.widget["upowerwidget"].info()["batteries"][0]["status"] == "Critical" @upower_dbus_servive @pytest.mark.parametrize("powerconfig", [{"battery_name": "BAT1"}], indirect=True) def test_upower_charging(manager_nospawn, powerconfig): manager_nospawn.start(powerconfig) battery_found(manager_nospawn) assert len(manager_nospawn.c.widget["upowerwidget"].info()["batteries"]) == 1 assert manager_nospawn.c.widget["upowerwidget"].info()["batteries"][0]["name"] == "BAT1" assert not manager_nospawn.c.widget["upowerwidget"].info()["charging"] assert manager_nospawn.c.widget["upowerwidget"].info()["batteries"][0]["tte"] == "3:23" assert not manager_nospawn.c.widget["upowerwidget"].info()["batteries"][0]["ttf"] # Trigger our method to toggle the charging state of the batteries dbussend = shutil.which("dbus-send") subprocess.run([ dbussend, f"--bus={os.environ['DBUS_SESSION_BUS_ADDRESS']}", "--type=method_call", "--dest=test.qtileextras.upower", "/org/freedesktop/UPower", "org.freedesktop.UPower.toggle_charge" ]) assert manager_nospawn.c.widget["upowerwidget"].info()["charging"] assert manager_nospawn.c.widget["upowerwidget"].info()["batteries"][0]["ttf"] == "1:03" assert not manager_nospawn.c.widget["upowerwidget"].info()["batteries"][0]["tte"] @upower_dbus_servive @pytest.mark.parametrize("powerconfig", [{"battery_name": "BAT1", "text_displaytime": 0.5}], indirect=True) def test_upower_show_text(manager_nospawn, powerconfig): manager_nospawn.start(powerconfig) battery_found(manager_nospawn) assert len(manager_nospawn.c.widget["upowerwidget"].info()["batteries"]) == 1 assert manager_nospawn.c.widget["upowerwidget"].info()["batteries"][0]["name"] == "BAT1" orig_width = manager_nospawn.c.widget["upowerwidget"].info()["width"] # Click on widget shows text so it should be wider now manager_nospawn.c.bar["top"].fake_button_press(0, "top", 0, 0, 1) assert manager_nospawn.c.widget["upowerwidget"].info()["width"] != orig_width # Click again to hide text so it's back to original width manager_nospawn.c.bar["top"].fake_button_press(0, "top", 0, 0, 1) assert manager_nospawn.c.widget["upowerwidget"].info()["width"] == orig_width # Check this still works when battery is charging # Trigger our method to toggle the charging state of the batteries dbussend = shutil.which("dbus-send") subprocess.run([ dbussend, f"--bus={os.environ['DBUS_SESSION_BUS_ADDRESS']}", "--type=method_call", "--dest=test.qtileextras.upower", "/org/freedesktop/UPower", "org.freedesktop.UPower.toggle_charge" ]) # Click on widget shows text so it should be wider now manager_nospawn.c.bar["top"].fake_button_press(0, "top", 0, 0, 1) assert manager_nospawn.c.widget["upowerwidget"].info()["width"] != orig_width # Let the timer hide the text text_hidden(manager_nospawn, orig_width)
import json import os from typing import TYPE_CHECKING from funcy import reraise from dvc.exceptions import DvcException from dvc.render.base import Renderer from dvc.render.utils import get_files from dvc.utils import relpath if TYPE_CHECKING: from dvc.types import StrPath class ImageRenderer(Renderer): TYPE = "image" DIV = """ <div id="{id}" style="border:1px solid black;text-align:center; white-space: nowrap;overflow-y:hidden;"> {partial} </div>""" SCRIPTS = "" @property def needs_output_path(self): return True def _write_image( self, path: "StrPath", revision: str, filename: str, image_data: bytes, ): img_path = os.path.join( path, f"{revision}_{filename.replace(os.sep, "_")}" ) with open(img_path, "wb") as fd: fd.write(image_data) return img_path def _save_images(self, path: "StrPath"): for rev, rev_data in self.data.items(): if "data" in rev_data: for file, file_data in rev_data.get("data", {}).items(): if "data" in file_data: if not os.path.isdir(path): os.makedirs(path, exist_ok=True) yield rev, file, self._write_image( os.path.abspath(path), rev, file, file_data["data"] ) def partial_html(self, **kwargs): path = kwargs.get("path", None) if not path: raise DvcException("Can't save here") static = os.path.join(path, "static") div_content = [] for rev, _, img_path in self._save_images(static): div_content.append( """ <div style="border:1px dotted black;margin:2px;display: inline-block; overflow:hidden;margin-left:8px;"> <p>{title}</p> <img src="{src}"> </div>""".format( title=rev, src=(relpath(img_path, path)) ) ) if div_content: div_content.insert(0, f"<p>{self.filename}</p>") return "\n".join(div_content) return "" def as_json(self, **kwargs): with reraise( KeyError, DvcException( f"{type(self).__name__} needs 'path' to store images." ), ): path = kwargs["path"] results = [] for revision, _, img_path in self._save_images(path): results.append( { self.TYPE_KEY: self.TYPE, self.REVISIONS_KEY: [revision], "url": img_path, } ) return json.dumps(results) @staticmethod def matches(data): files = get_files(data) extensions = set(map(lambda f: os.path.splitext(f)[1], files)) return extensions.issubset({".jpg", ".jpeg", ".gif", ".png"})
import json import os from typing import TYPE_CHECKING from funcy import reraise from dvc.exceptions import DvcException from dvc.render.base import Renderer from dvc.render.utils import get_files from dvc.utils import relpath if TYPE_CHECKING: from dvc.types import StrPath class ImageRenderer(Renderer): TYPE = "image" DIV = """ <div id="{id}" style="border:1px solid black;text-align:center; white-space: nowrap;overflow-y:hidden;"> {partial} </div>""" SCRIPTS = "" @property def needs_output_path(self): return True def _write_image( self, path: "StrPath", revision: str, filename: str, image_data: bytes, ): img_path = os.path.join( path, f"{revision}_{filename.replace(os.sep, '_')}" ) with open(img_path, "wb") as fd: fd.write(image_data) return img_path def _save_images(self, path: "StrPath"): for rev, rev_data in self.data.items(): if "data" in rev_data: for file, file_data in rev_data.get("data", {}).items(): if "data" in file_data: if not os.path.isdir(path): os.makedirs(path, exist_ok=True) yield rev, file, self._write_image( os.path.abspath(path), rev, file, file_data["data"] ) def partial_html(self, **kwargs): path = kwargs.get("path", None) if not path: raise DvcException("Can't save here") static = os.path.join(path, "static") div_content = [] for rev, _, img_path in self._save_images(static): div_content.append( """ <div style="border:1px dotted black;margin:2px;display: inline-block; overflow:hidden;margin-left:8px;"> <p>{title}</p> <img src="{src}"> </div>""".format( title=rev, src=(relpath(img_path, path)) ) ) if div_content: div_content.insert(0, f"<p>{self.filename}</p>") return "\n".join(div_content) return "" def as_json(self, **kwargs): with reraise( KeyError, DvcException( f"{type(self).__name__} needs 'path' to store images." ), ): path = kwargs["path"] results = [] for revision, _, img_path in self._save_images(path): results.append( { self.TYPE_KEY: self.TYPE, self.REVISIONS_KEY: [revision], "url": img_path, } ) return json.dumps(results) @staticmethod def matches(data): files = get_files(data) extensions = set(map(lambda f: os.path.splitext(f)[1], files)) return extensions.issubset({".jpg", ".jpeg", ".gif", ".png"})
# # Copyright 2018-2021 Elyra Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from abc import ABC from abc import abstractmethod import json import os from typing import List class NodeFile(object): """Base class for input and output node files""" def __init__(self, filename: str) -> None: self.filename = filename class InputNodeFile(NodeFile): """Given a filename, it ensures the file exists and can read its contents.""" def __init__(self, filename: str) -> None: super().__init__(filename) self.data = None if not os.path.exists(self.filename): raise FileNotFoundError("File '{}' does not exist!".format(self.filename)) def read(self) -> str: with open(self.filename) as f: self.data = f.read() return self.data def data(self) -> str: return self.data class OutputNodeFile(NodeFile): """Given a filename, it ensures the file does not exist and will write data to that file.""" def __init__(self, filename: str) -> None: super().__init__(filename) # Don't enforce output file existence here - break idempotency # if os.path.exists(self.filename): # raise FileExistsError("File '{}' already exists!".format(self.filename)) def write(self, data) -> None: with open(self.filename, 'w+') as f: f.write(data) class ExecutionNode(ABC): """Represents an excutable node of a pipeline. This class must be subclassed.""" node_name = None filename = None extension = None def __init__(self) -> None: self.filename = os.getenv('NODE_FILENAME') if not self.filename: raise ValueError("NODE_FILENAME environment variable must be set!") node_file_splits = os.path.basename(self.filename).split(".") self.node_name = node_file_splits[0] self.extension = node_file_splits[1] self.validate() def validate(self) -> None: """Validate the filename as best as possible, depending on subclass. """ # Validate its extension and that the file exists. self.validate_extension() if not os.path.exists(self.filename): raise FileNotFoundError("ExecutionNode filename '{}' does not exist!".format(self.filename)) def run(self) -> None: self.process_inputs("INPUT_FILENAMES") self.perform_experiment() self.process_outputs("OUTPUT_FILENAMES") def perform_experiment(self) -> None: """Emulates the experiment to run.""" print(f"NODE_NAME: {self.node_name}") runtime_env = os.getenv("ELYRA_RUNTIME_ENV") assert runtime_env == "local", "ELYRA_RUNTIME_ENV has not been set to 'local'!" print(f"ELYRA_RUNTIME_ENV: {runtime_env}") run_name = os.getenv("ELYRA_RUN_NAME") assert run_name is not None, "ELYRA_RUN_NAME is not set!" print(f"ELYRA_RUN_NAME: {run_name}") pipeline_name = os.getenv("PIPELINE_NAME") print(f"PIPELINE_NAME: {pipeline_name}") assert pipeline_name is not None, "PIPELINE_NAME is not set!" assert run_name.startswith(pipeline_name), "ELYRA_RUN_NAME does not start with pipeline name!" def process_inputs(self, env_var: str) -> List[InputNodeFile]: """Given an environment variable `env_var`, that contains a SEMI-COLON-separated list of filenames, it processes each entry by instantiating an instance of InputNodeFile corresponding to each entry and returns the list of instances. """ inputs = [] filenames = os.getenv(env_var, "").split(';') for filename in filenames: if filename: inputs.append(InputNodeFile(filename)) for input_file in inputs: payload = json.loads(input_file.read()) print(f"FROM: {payload.get("node")}") assert payload.get('run_name') == os.getenv("ELYRA_RUN_NAME") return inputs def process_outputs(self, env_var: str) -> List[OutputNodeFile]: """Given an environment variable `env_var`, that contains a SEMI-COLON-separated list of filenames, it processes each entry by instantiating an instance of OutputNodeFile corresponding to each entry and returns the list of instances. """ outputs = [] filenames = os.getenv(env_var, "").split(';') for filename in filenames: if filename: outputs.append(OutputNodeFile(filename)) # Include ELYRA_RUN_NAME in all outputs - which are verified when used as inputs payload = {"node": self.node_name, "run_name": os.getenv("ELYRA_RUN_NAME")} for output_file in outputs: output_file.write(json.dumps(payload)) return outputs @abstractmethod def expected_extension(self) -> str: raise NotImplementedError("Method 'expected_extension()' must be implemented by subclass '{}'!". format(self.__class__.__name__)) def validate_extension(self) -> None: if self.expected_extension() != self.extension: raise ValueError("Filename '{}' does not have a proper extension: '{}'". format(self.filename, self.expected_extension())) class NotebookNode(ExecutionNode): """Represents a Notebook execution node of a pipeline.""" def expected_extension(self) -> str: return "ipynb" def validate(self) -> None: """For notebooks, we can also ensure the file can be loaded as JSON.""" super().validate() # Confirm file can be loaded as JSON with open(self.filename) as f: json.load(f) class PythonNode(ExecutionNode): """Represents a Python file execution node of a pipeline.""" def expected_extension(self) -> str: return "py"
# # Copyright 2018-2021 Elyra Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from abc import ABC from abc import abstractmethod import json import os from typing import List class NodeFile(object): """Base class for input and output node files""" def __init__(self, filename: str) -> None: self.filename = filename class InputNodeFile(NodeFile): """Given a filename, it ensures the file exists and can read its contents.""" def __init__(self, filename: str) -> None: super().__init__(filename) self.data = None if not os.path.exists(self.filename): raise FileNotFoundError("File '{}' does not exist!".format(self.filename)) def read(self) -> str: with open(self.filename) as f: self.data = f.read() return self.data def data(self) -> str: return self.data class OutputNodeFile(NodeFile): """Given a filename, it ensures the file does not exist and will write data to that file.""" def __init__(self, filename: str) -> None: super().__init__(filename) # Don't enforce output file existence here - break idempotency # if os.path.exists(self.filename): # raise FileExistsError("File '{}' already exists!".format(self.filename)) def write(self, data) -> None: with open(self.filename, 'w+') as f: f.write(data) class ExecutionNode(ABC): """Represents an excutable node of a pipeline. This class must be subclassed.""" node_name = None filename = None extension = None def __init__(self) -> None: self.filename = os.getenv('NODE_FILENAME') if not self.filename: raise ValueError("NODE_FILENAME environment variable must be set!") node_file_splits = os.path.basename(self.filename).split(".") self.node_name = node_file_splits[0] self.extension = node_file_splits[1] self.validate() def validate(self) -> None: """Validate the filename as best as possible, depending on subclass. """ # Validate its extension and that the file exists. self.validate_extension() if not os.path.exists(self.filename): raise FileNotFoundError("ExecutionNode filename '{}' does not exist!".format(self.filename)) def run(self) -> None: self.process_inputs("INPUT_FILENAMES") self.perform_experiment() self.process_outputs("OUTPUT_FILENAMES") def perform_experiment(self) -> None: """Emulates the experiment to run.""" print(f"NODE_NAME: {self.node_name}") runtime_env = os.getenv("ELYRA_RUNTIME_ENV") assert runtime_env == "local", "ELYRA_RUNTIME_ENV has not been set to 'local'!" print(f"ELYRA_RUNTIME_ENV: {runtime_env}") run_name = os.getenv("ELYRA_RUN_NAME") assert run_name is not None, "ELYRA_RUN_NAME is not set!" print(f"ELYRA_RUN_NAME: {run_name}") pipeline_name = os.getenv("PIPELINE_NAME") print(f"PIPELINE_NAME: {pipeline_name}") assert pipeline_name is not None, "PIPELINE_NAME is not set!" assert run_name.startswith(pipeline_name), "ELYRA_RUN_NAME does not start with pipeline name!" def process_inputs(self, env_var: str) -> List[InputNodeFile]: """Given an environment variable `env_var`, that contains a SEMI-COLON-separated list of filenames, it processes each entry by instantiating an instance of InputNodeFile corresponding to each entry and returns the list of instances. """ inputs = [] filenames = os.getenv(env_var, "").split(';') for filename in filenames: if filename: inputs.append(InputNodeFile(filename)) for input_file in inputs: payload = json.loads(input_file.read()) print(f"FROM: {payload.get('node')}") assert payload.get('run_name') == os.getenv("ELYRA_RUN_NAME") return inputs def process_outputs(self, env_var: str) -> List[OutputNodeFile]: """Given an environment variable `env_var`, that contains a SEMI-COLON-separated list of filenames, it processes each entry by instantiating an instance of OutputNodeFile corresponding to each entry and returns the list of instances. """ outputs = [] filenames = os.getenv(env_var, "").split(';') for filename in filenames: if filename: outputs.append(OutputNodeFile(filename)) # Include ELYRA_RUN_NAME in all outputs - which are verified when used as inputs payload = {"node": self.node_name, "run_name": os.getenv("ELYRA_RUN_NAME")} for output_file in outputs: output_file.write(json.dumps(payload)) return outputs @abstractmethod def expected_extension(self) -> str: raise NotImplementedError("Method 'expected_extension()' must be implemented by subclass '{}'!". format(self.__class__.__name__)) def validate_extension(self) -> None: if self.expected_extension() != self.extension: raise ValueError("Filename '{}' does not have a proper extension: '{}'". format(self.filename, self.expected_extension())) class NotebookNode(ExecutionNode): """Represents a Notebook execution node of a pipeline.""" def expected_extension(self) -> str: return "ipynb" def validate(self) -> None: """For notebooks, we can also ensure the file can be loaded as JSON.""" super().validate() # Confirm file can be loaded as JSON with open(self.filename) as f: json.load(f) class PythonNode(ExecutionNode): """Represents a Python file execution node of a pipeline.""" def expected_extension(self) -> str: return "py"
try: import importlib.resources as resources except ModuleNotFoundError: import importlib_resources as resources from itertools import takewhile import os from pathlib import Path import shutil import subprocess as sp import sys from typing import Dict, Iterable, Optional, Tuple with resources.path('pyscreener.docking.dock_utils', '.') as p_module: PREP_REC = p_module / 'scripts' / 'prep_rec.py' WRITE_DMS = p_module / 'scripts' / 'write_dms.py' DOCK6 = Path(os.environ['DOCK6']) DOCK6_BIN = DOCK6 / 'bin' DOCK6_PARAMS = DOCK6 / 'parameters' DMS = DOCK6_BIN / 'dms' SPHGEN = DOCK6_BIN / 'sphgen_cpp' SPHERE_SELECTOR = DOCK6_BIN / 'sphere_selector' SHOWBOX = DOCK6_BIN / 'showbox' GRID = DOCK6_BIN / 'grid' VDW_DEFN_FILE = DOCK6_PARAMS / 'vdw_AMBER_parm99.defn' def prepare_from_smi(smi: str, name: str = 'ligand', path: str = '.', **kwargs) -> Optional[Tuple]: """Prepare an input ligand file from the ligand's SMILES string Parameters ---------- smi : str the SMILES string of the ligand name : Optional[str] (Default = None) the name of the ligand. path : str (Default = '.') the path under which the output PDBQT file should be written **kwargs additional and unused keyword arguments Returns ------- Optional[Tuple] a tuple of the SMILES string and the corresponding prepared input file. None if preparation failed for any reason """ path = Path(path) if not path.is_dir(): path.mkdir() mol2 = str(path / f'{name}.mol2') argv = ['obabel', f'-:{smi}', '-omol2', '-O', mol2, '-h', '--gen3d', '--partialcharge', 'gasteiger'] ret = sp.run(argv, check=False, stderr=sp.PIPE) try: ret.check_returncode() return smi, mol2 except sp.SubprocessError: return None def prepare_from_file(filename: str, use_3d: bool = False, name: Optional[str] = None, path: str = '.', **kwargs) -> Optional[Tuple]: """Convert a single ligand to the appropriate input format Parameters ---------- filename : str the name of the file containing the ligand use_3d : bool (Default = False) whether to use the 3D information in the input file (if possible) prepare_from_smi: Callable[..., Tuple[str, str]] a function that prepares an input ligand file from a SMILES string name : Optional[str] (Default = None) the name of the ligand. If None, use the stem of the input file path : str (Default = '.') the path under which the output .pdbqt file should be written **kwargs additional and unused keyword arguments Returns ------- Optional[List[Tuple]] a tuple of the SMILES string the prepared input file corresponding to the molecule contained in filename """ name = name or Path(filename).stem ret = sp.run(['obabel', filename, '-osmi'], stdout=sp.PIPE, check=True) lines = ret.stdout.decode('utf-8').splitlines() smis = [line.split()[0] for line in lines] if not use_3d: ligands = [prepare_from_smi(smi, f'{name}_{i}', path) for i, smi in enumerate(smis)] return [lig for lig in ligands if lig] path = Path(path) if not path.is_dir(): path.mkdir() mol2 = f'{path}/{name}_.mol2' argv = ['obabel', filename, '-omol2', '-O', mol2, '-m', '-h', '--partialcharge', 'gasteiger'] ret = sp.run(argv, check=False, stderr=sp.PIPE) try: ret.check_returncode() except sp.SubprocessError: return None stderr = ret.stderr.decode('utf-8') for line in stderr.splitlines(): if 'converted' not in line: continue n_mols = int(line.split()[0]) mol2s = [f'{path}/{name}_{i}.mol2' for i in range(1, n_mols)] return list(zip(smis, mol2s)) def prepare_receptor(receptor: str, probe_radius: float = 1.4, steric_clash_dist: float = 0.0, min_radius: float = 1.4, max_radius: float = 4.0, center: Optional[Tuple[float, float, float]] = None, size: Tuple[float, float, float] = (20., 20., 20.), docked_ligand_file: Optional[str] = None, use_largest: bool = False, buffer: float = 10., enclose_spheres: bool = True, path: str = '.') -> Optional[Tuple[str, str]]: """Prepare the DOCK input files corresponding to the given receptor Parameters ---------- receptor : str the filepath of a file containing a receptor. Must be in a file that is readable by Chimera center : Tuple[float, float, float] the x-, y-, and z-coordinates of the center of the docking box size : Tuple[float, float, float] (Default = (20, 20, 20)) the x-, y-, and z-radii of the docking box docked_ligand_file : Optional[str] (Default = None) the filepath of a file containing the coordinates of a docked ligand use_largest : bool (Default = False) whether to use the largest cluster of spheres when selecting spheres buffer : float (Default = 10.) the amount of buffer space to be added around the docked ligand when selecting spheres and when constructing the docking box if enclose_spheres is True enclose_spheres : bool (Default = True) whether to calculate the docking box by enclosing the selected spheres or to use an input center and radii Returns ------- sph_grid : Optional[Tuple[str, str]] A tuple of strings with the first entry being the filepath of the file containing the selected spheres and the second being entry the prefix of all prepared grid files. None if receptor preparation fails at any point """ rec_mol2 = prepare_mol2(receptor, path) rec_pdb = prepare_pdb(receptor, path) if rec_mol2 is None or rec_pdb is None: return None rec_dms = prepare_dms(rec_pdb, probe_radius, path) if rec_dms is None: return None rec_sph = prepare_sph(rec_dms, steric_clash_dist, min_radius, max_radius, path) if rec_sph is None: return None rec_sph = select_spheres( rec_sph, center, size, docked_ligand_file, use_largest, buffer, path ) rec_box = prepare_box(rec_sph, center, size, enclose_spheres, buffer, path) if rec_box is None: return None grid_prefix = prepare_grid(rec_mol2, rec_box, path) if grid_prefix is None: return None return rec_sph, grid_prefix def prepare_mol2(receptor: str, path: str = '.') -> Optional[str]: """Prepare a receptor mol2 file from its input file Parameter --------- receptor : str the filename of a file containing the receptor Returns ------- receptor_mol2 : Optional[str] the filename of the prepared mol2 file """ p_rec = Path(receptor) p_rec_mol2 = Path(path) / f'{p_rec.stem}_withH.mol2' # (p_rec.with_name(f'{p_rec.stem}_withH.mol2')) args = ['chimera', '--nogui', '--script', f'{PREP_REC} {receptor} {p_rec_mol2}'] ret = sp.run(args, stdout=sp.PIPE, stderr=sp.PIPE) try: ret.check_returncode() except sp.SubprocessError: print(f'ERROR: failed to convert receptor: "{receptor}"') if ret.stderr: print(f'Message: {ret.stderr.decode('utf-8')}', file=sys.stderr) return None return str(p_rec_mol2) def prepare_pdb(receptor: str, path: str = '.') -> Optional[str]: """Prepare a receptor PDB file for usage in DOCK runs Parameter --------- receptor : str the filename of a file containing the receptor Returns ------- receptor_mol2 : Optional[str] the filename of the prepared pdb file """ p_rec = Path(receptor) rec_pdb = str(Path(path) / f'DOCK_{p_rec.stem}.pdb') # rec_pdb = str(p_rec.with_name(f'DOCK_{p_rec.stem}.pdb')) args = ['obabel', receptor, '-opdb', '-O', rec_pdb] ret = sp.run(args, stderr=sp.PIPE) try: ret.check_returncode() except sp.SubprocessError: print(f'ERROR: failed to convert receptor: "{receptor}"') if ret.stderr: print(f'Message: {ret.stderr.decode('utf-8')}', file=sys.stderr) return None return rec_pdb def prepare_dms(rec_pdb: str, probe_radius: float = 1.4, path: str = '.') -> Optional[str]: # p_rec_pdb = Path(rec_pdb) # rec_dms = str(Path(rec_pdb).with_suffix('.dms')) p_rec_dms = Path(path) / f'{Path(rec_pdb).stem}.dms' argv = ['chimera', '--nogui', '--script', f'{WRITE_DMS} {rec_pdb} {probe_radius} {str(p_rec_dms)}'] ret = sp.run(argv, stdout=sp.PIPE) try: ret.check_returncode() except sp.SubprocessError: print(f'ERROR: failed to generate surface from "{rec_pdb}"', file=sys.stderr) if ret.stderr: print(f'Message: {ret.stderr.decode('utf-8')}', file=sys.stderr) # return None return str(p_rec_dms) def prepare_sph(rec_dms: str, steric_clash_dist: float = 0.0, min_radius: float = 1.4, max_radius: float = 4.0, path: str = '.') -> Optional[str]: # sph_file = str(Path(rec_dms).with_suffix('.sph')) sph_file = str(Path(path) / f'{Path(rec_dms).stem}.sph') argv = [SPHGEN, '-i', rec_dms, '-o', sph_file, '-s', 'R', 'd', 'X', '-l', str(steric_clash_dist), 'm', str(min_radius), '-x', str(max_radius)] ret = sp.run(argv, stdout=sp.PIPE) try: ret.check_returncode() except sp.SubprocessError: print(f'ERROR: failed to generate spheres for "{rec_dms}"', file=sys.stderr) if ret.stderr: print(f'Message: {ret.stderr.decode('utf-8')}', file=sys.stderr) return None return sph_file def select_spheres(sph_file: str, center: Tuple[float, float, float], size: Tuple[float, float, float], docked_ligand_file: Optional[str] = None, use_largest: bool = False, buffer: float = 10.0, path: str = '.') -> Optional[str]: p_sph = Path(sph_file) selected_sph = str(Path(path) / f'{p_sph.stem}_selected_spheres.sph') # selected_sph = str(p_sph.parent / f'{p_sph.stem}_selected{p_sph.suffix}') if docked_ligand_file: argv = [SPHERE_SELECTOR, sph_file, docked_ligand_file, buffer] sp.run(argv, check=True) # sphere_selector always outputs this filename shutil.move('selected_spheres.sph', selected_sph) return selected_sph def inside_docking_box(line): """Are the coordinates contained in the line inside the docking box?""" try: tokens = line.split() xyz = list(map(float, tokens[1:4])) except ValueError: return False for i, coord in enumerate(xyz): if not center[i]-size[i] <= coord <= center[i]+size[i]: return False return True with open(sph_file, 'r') as fin, open(selected_sph, 'w') as fout: if use_largest: fout.write(f'DOCK spheres largest cluster\n') for line in takewhile(lambda line: 'cluster' not in line, fin): continue lines = list(takewhile(lambda line: 'cluster' not in line, fin)) else: fout.write(f'DOCK spheres within radii {size} of {center}\n') lines = [line for line in fin if inside_docking_box(line)] fout.write(f'cluster 1 number of spheres in cluster {len(lines)}\n') fout.writelines(lines) return selected_sph def prepare_box(sph_file: str, center: Tuple[float, float, float], size: Tuple[float, float, float], enclose_spheres: bool = True, buffer: float = 10.0, path: str = '.') -> Optional[str]: p_sph = Path(sph_file) shutil.copyfile(sph_file, 'tmp_spheres.sph') # p_box = p_sph.with_name(f'{p_sph.stem}_box.pdb') box_file = str(Path(path) / f'{p_sph.stem}_box.pdb') # box_file = str(p_box) if enclose_spheres: showbox_input = f'Y\n{buffer}\ntmp_spheres.sph\n1\n' else: x, y, z = center r_x, r_y, r_z = size showbox_input = f'N\nU\n{x} {y} {z}\n{r_x} {r_y} {r_z}\n' showbox_input += 'tmp_box.pdb\n' # with open('box.in', 'w') as fid: # if enclose_spheres: # fid.write('Y\n') # fid.write(f'{buffer}\n') # fid.write(f'{sph_file}\n') # fid.write(f'1\n') # else: # fid.write('N\n') # fid.write('U\n') # fid.write(f'[{center[0]} {center[1]} {center[2]}]\n') # fid.write(f'[{size[0]} {size[1]} {size[2]}]\n') # fid.write(f'{box_file}\n') ret = sp.run([SHOWBOX], input=showbox_input, universal_newlines=True, stdout=sp.PIPE) try: ret.check_returncode() except sp.SubprocessError: print(f'ERROR: failed to generate box corresponding to "{sph_file}"', file=sys.stderr) if ret.stderr: print(f'Message: {ret.stderr.decode('utf-8')}', file=sys.stderr) return None os.unlink('tmp_spheres.sph') shutil.move('tmp_box.pdb', box_file) return box_file def prepare_grid(rec_mol2: str, box_file: str, path: str = '.') -> Optional[str]: p_rec = Path(rec_mol2) p_grid_prefix = Path(path) / f'{p_rec.stem}_grid' shutil.copy(box_file, 'tmp_box.pdb') with open('grid.in', 'w') as fid: fid.write('compute_grids yes\n') fid.write('grid_spacing 0.4\n') fid.write('output_molecule no\n') fid.write('contact_score no\n') fid.write('energy_score yes\n') fid.write('energy_cutoff_distance 9999\n') fid.write('atom_model a\n') fid.write('attractive_exponent 6\n') fid.write('repulsive_exponent 12\n') fid.write('distance_dielectric yes\n') fid.write('dielectric_factor 4.0\n') fid.write('bump_filter yes\n') fid.write('bump_overlap 0.75\n') fid.write(f'receptor_file {rec_mol2}\n') fid.write('box_file tmp_box.pdb\n') fid.write(f'vdw_definition_file {VDW_DEFN_FILE}\n') fid.write(f'score_grid_prefix {p_grid_prefix}\n') ret = sp.run([GRID, '-i', 'grid.in', '-o', 'gridinfo.out'], stdout=sp.PIPE) try: ret.check_returncode() except sp.SubprocessError: print(f'ERROR: failed to generate grid from {rec_mol2}', file=sys.stderr) if ret.stderr: print(f'Message: {ret.stderr.decode('utf-8')}', file=sys.stderr) return None os.unlink('tmp_box.pdb') return str(p_grid_prefix)
try: import importlib.resources as resources except ModuleNotFoundError: import importlib_resources as resources from itertools import takewhile import os from pathlib import Path import shutil import subprocess as sp import sys from typing import Dict, Iterable, Optional, Tuple with resources.path('pyscreener.docking.dock_utils', '.') as p_module: PREP_REC = p_module / 'scripts' / 'prep_rec.py' WRITE_DMS = p_module / 'scripts' / 'write_dms.py' DOCK6 = Path(os.environ['DOCK6']) DOCK6_BIN = DOCK6 / 'bin' DOCK6_PARAMS = DOCK6 / 'parameters' DMS = DOCK6_BIN / 'dms' SPHGEN = DOCK6_BIN / 'sphgen_cpp' SPHERE_SELECTOR = DOCK6_BIN / 'sphere_selector' SHOWBOX = DOCK6_BIN / 'showbox' GRID = DOCK6_BIN / 'grid' VDW_DEFN_FILE = DOCK6_PARAMS / 'vdw_AMBER_parm99.defn' def prepare_from_smi(smi: str, name: str = 'ligand', path: str = '.', **kwargs) -> Optional[Tuple]: """Prepare an input ligand file from the ligand's SMILES string Parameters ---------- smi : str the SMILES string of the ligand name : Optional[str] (Default = None) the name of the ligand. path : str (Default = '.') the path under which the output PDBQT file should be written **kwargs additional and unused keyword arguments Returns ------- Optional[Tuple] a tuple of the SMILES string and the corresponding prepared input file. None if preparation failed for any reason """ path = Path(path) if not path.is_dir(): path.mkdir() mol2 = str(path / f'{name}.mol2') argv = ['obabel', f'-:{smi}', '-omol2', '-O', mol2, '-h', '--gen3d', '--partialcharge', 'gasteiger'] ret = sp.run(argv, check=False, stderr=sp.PIPE) try: ret.check_returncode() return smi, mol2 except sp.SubprocessError: return None def prepare_from_file(filename: str, use_3d: bool = False, name: Optional[str] = None, path: str = '.', **kwargs) -> Optional[Tuple]: """Convert a single ligand to the appropriate input format Parameters ---------- filename : str the name of the file containing the ligand use_3d : bool (Default = False) whether to use the 3D information in the input file (if possible) prepare_from_smi: Callable[..., Tuple[str, str]] a function that prepares an input ligand file from a SMILES string name : Optional[str] (Default = None) the name of the ligand. If None, use the stem of the input file path : str (Default = '.') the path under which the output .pdbqt file should be written **kwargs additional and unused keyword arguments Returns ------- Optional[List[Tuple]] a tuple of the SMILES string the prepared input file corresponding to the molecule contained in filename """ name = name or Path(filename).stem ret = sp.run(['obabel', filename, '-osmi'], stdout=sp.PIPE, check=True) lines = ret.stdout.decode('utf-8').splitlines() smis = [line.split()[0] for line in lines] if not use_3d: ligands = [prepare_from_smi(smi, f'{name}_{i}', path) for i, smi in enumerate(smis)] return [lig for lig in ligands if lig] path = Path(path) if not path.is_dir(): path.mkdir() mol2 = f'{path}/{name}_.mol2' argv = ['obabel', filename, '-omol2', '-O', mol2, '-m', '-h', '--partialcharge', 'gasteiger'] ret = sp.run(argv, check=False, stderr=sp.PIPE) try: ret.check_returncode() except sp.SubprocessError: return None stderr = ret.stderr.decode('utf-8') for line in stderr.splitlines(): if 'converted' not in line: continue n_mols = int(line.split()[0]) mol2s = [f'{path}/{name}_{i}.mol2' for i in range(1, n_mols)] return list(zip(smis, mol2s)) def prepare_receptor(receptor: str, probe_radius: float = 1.4, steric_clash_dist: float = 0.0, min_radius: float = 1.4, max_radius: float = 4.0, center: Optional[Tuple[float, float, float]] = None, size: Tuple[float, float, float] = (20., 20., 20.), docked_ligand_file: Optional[str] = None, use_largest: bool = False, buffer: float = 10., enclose_spheres: bool = True, path: str = '.') -> Optional[Tuple[str, str]]: """Prepare the DOCK input files corresponding to the given receptor Parameters ---------- receptor : str the filepath of a file containing a receptor. Must be in a file that is readable by Chimera center : Tuple[float, float, float] the x-, y-, and z-coordinates of the center of the docking box size : Tuple[float, float, float] (Default = (20, 20, 20)) the x-, y-, and z-radii of the docking box docked_ligand_file : Optional[str] (Default = None) the filepath of a file containing the coordinates of a docked ligand use_largest : bool (Default = False) whether to use the largest cluster of spheres when selecting spheres buffer : float (Default = 10.) the amount of buffer space to be added around the docked ligand when selecting spheres and when constructing the docking box if enclose_spheres is True enclose_spheres : bool (Default = True) whether to calculate the docking box by enclosing the selected spheres or to use an input center and radii Returns ------- sph_grid : Optional[Tuple[str, str]] A tuple of strings with the first entry being the filepath of the file containing the selected spheres and the second being entry the prefix of all prepared grid files. None if receptor preparation fails at any point """ rec_mol2 = prepare_mol2(receptor, path) rec_pdb = prepare_pdb(receptor, path) if rec_mol2 is None or rec_pdb is None: return None rec_dms = prepare_dms(rec_pdb, probe_radius, path) if rec_dms is None: return None rec_sph = prepare_sph(rec_dms, steric_clash_dist, min_radius, max_radius, path) if rec_sph is None: return None rec_sph = select_spheres( rec_sph, center, size, docked_ligand_file, use_largest, buffer, path ) rec_box = prepare_box(rec_sph, center, size, enclose_spheres, buffer, path) if rec_box is None: return None grid_prefix = prepare_grid(rec_mol2, rec_box, path) if grid_prefix is None: return None return rec_sph, grid_prefix def prepare_mol2(receptor: str, path: str = '.') -> Optional[str]: """Prepare a receptor mol2 file from its input file Parameter --------- receptor : str the filename of a file containing the receptor Returns ------- receptor_mol2 : Optional[str] the filename of the prepared mol2 file """ p_rec = Path(receptor) p_rec_mol2 = Path(path) / f'{p_rec.stem}_withH.mol2' # (p_rec.with_name(f'{p_rec.stem}_withH.mol2')) args = ['chimera', '--nogui', '--script', f'{PREP_REC} {receptor} {p_rec_mol2}'] ret = sp.run(args, stdout=sp.PIPE, stderr=sp.PIPE) try: ret.check_returncode() except sp.SubprocessError: print(f'ERROR: failed to convert receptor: "{receptor}"') if ret.stderr: print(f'Message: {ret.stderr.decode("utf-8")}', file=sys.stderr) return None return str(p_rec_mol2) def prepare_pdb(receptor: str, path: str = '.') -> Optional[str]: """Prepare a receptor PDB file for usage in DOCK runs Parameter --------- receptor : str the filename of a file containing the receptor Returns ------- receptor_mol2 : Optional[str] the filename of the prepared pdb file """ p_rec = Path(receptor) rec_pdb = str(Path(path) / f'DOCK_{p_rec.stem}.pdb') # rec_pdb = str(p_rec.with_name(f'DOCK_{p_rec.stem}.pdb')) args = ['obabel', receptor, '-opdb', '-O', rec_pdb] ret = sp.run(args, stderr=sp.PIPE) try: ret.check_returncode() except sp.SubprocessError: print(f'ERROR: failed to convert receptor: "{receptor}"') if ret.stderr: print(f'Message: {ret.stderr.decode("utf-8")}', file=sys.stderr) return None return rec_pdb def prepare_dms(rec_pdb: str, probe_radius: float = 1.4, path: str = '.') -> Optional[str]: # p_rec_pdb = Path(rec_pdb) # rec_dms = str(Path(rec_pdb).with_suffix('.dms')) p_rec_dms = Path(path) / f'{Path(rec_pdb).stem}.dms' argv = ['chimera', '--nogui', '--script', f'{WRITE_DMS} {rec_pdb} {probe_radius} {str(p_rec_dms)}'] ret = sp.run(argv, stdout=sp.PIPE) try: ret.check_returncode() except sp.SubprocessError: print(f'ERROR: failed to generate surface from "{rec_pdb}"', file=sys.stderr) if ret.stderr: print(f'Message: {ret.stderr.decode("utf-8")}', file=sys.stderr) # return None return str(p_rec_dms) def prepare_sph(rec_dms: str, steric_clash_dist: float = 0.0, min_radius: float = 1.4, max_radius: float = 4.0, path: str = '.') -> Optional[str]: # sph_file = str(Path(rec_dms).with_suffix('.sph')) sph_file = str(Path(path) / f'{Path(rec_dms).stem}.sph') argv = [SPHGEN, '-i', rec_dms, '-o', sph_file, '-s', 'R', 'd', 'X', '-l', str(steric_clash_dist), 'm', str(min_radius), '-x', str(max_radius)] ret = sp.run(argv, stdout=sp.PIPE) try: ret.check_returncode() except sp.SubprocessError: print(f'ERROR: failed to generate spheres for "{rec_dms}"', file=sys.stderr) if ret.stderr: print(f'Message: {ret.stderr.decode("utf-8")}', file=sys.stderr) return None return sph_file def select_spheres(sph_file: str, center: Tuple[float, float, float], size: Tuple[float, float, float], docked_ligand_file: Optional[str] = None, use_largest: bool = False, buffer: float = 10.0, path: str = '.') -> Optional[str]: p_sph = Path(sph_file) selected_sph = str(Path(path) / f'{p_sph.stem}_selected_spheres.sph') # selected_sph = str(p_sph.parent / f'{p_sph.stem}_selected{p_sph.suffix}') if docked_ligand_file: argv = [SPHERE_SELECTOR, sph_file, docked_ligand_file, buffer] sp.run(argv, check=True) # sphere_selector always outputs this filename shutil.move('selected_spheres.sph', selected_sph) return selected_sph def inside_docking_box(line): """Are the coordinates contained in the line inside the docking box?""" try: tokens = line.split() xyz = list(map(float, tokens[1:4])) except ValueError: return False for i, coord in enumerate(xyz): if not center[i]-size[i] <= coord <= center[i]+size[i]: return False return True with open(sph_file, 'r') as fin, open(selected_sph, 'w') as fout: if use_largest: fout.write(f'DOCK spheres largest cluster\n') for line in takewhile(lambda line: 'cluster' not in line, fin): continue lines = list(takewhile(lambda line: 'cluster' not in line, fin)) else: fout.write(f'DOCK spheres within radii {size} of {center}\n') lines = [line for line in fin if inside_docking_box(line)] fout.write(f'cluster 1 number of spheres in cluster {len(lines)}\n') fout.writelines(lines) return selected_sph def prepare_box(sph_file: str, center: Tuple[float, float, float], size: Tuple[float, float, float], enclose_spheres: bool = True, buffer: float = 10.0, path: str = '.') -> Optional[str]: p_sph = Path(sph_file) shutil.copyfile(sph_file, 'tmp_spheres.sph') # p_box = p_sph.with_name(f'{p_sph.stem}_box.pdb') box_file = str(Path(path) / f'{p_sph.stem}_box.pdb') # box_file = str(p_box) if enclose_spheres: showbox_input = f'Y\n{buffer}\ntmp_spheres.sph\n1\n' else: x, y, z = center r_x, r_y, r_z = size showbox_input = f'N\nU\n{x} {y} {z}\n{r_x} {r_y} {r_z}\n' showbox_input += 'tmp_box.pdb\n' # with open('box.in', 'w') as fid: # if enclose_spheres: # fid.write('Y\n') # fid.write(f'{buffer}\n') # fid.write(f'{sph_file}\n') # fid.write(f'1\n') # else: # fid.write('N\n') # fid.write('U\n') # fid.write(f'[{center[0]} {center[1]} {center[2]}]\n') # fid.write(f'[{size[0]} {size[1]} {size[2]}]\n') # fid.write(f'{box_file}\n') ret = sp.run([SHOWBOX], input=showbox_input, universal_newlines=True, stdout=sp.PIPE) try: ret.check_returncode() except sp.SubprocessError: print(f'ERROR: failed to generate box corresponding to "{sph_file}"', file=sys.stderr) if ret.stderr: print(f'Message: {ret.stderr.decode("utf-8")}', file=sys.stderr) return None os.unlink('tmp_spheres.sph') shutil.move('tmp_box.pdb', box_file) return box_file def prepare_grid(rec_mol2: str, box_file: str, path: str = '.') -> Optional[str]: p_rec = Path(rec_mol2) p_grid_prefix = Path(path) / f'{p_rec.stem}_grid' shutil.copy(box_file, 'tmp_box.pdb') with open('grid.in', 'w') as fid: fid.write('compute_grids yes\n') fid.write('grid_spacing 0.4\n') fid.write('output_molecule no\n') fid.write('contact_score no\n') fid.write('energy_score yes\n') fid.write('energy_cutoff_distance 9999\n') fid.write('atom_model a\n') fid.write('attractive_exponent 6\n') fid.write('repulsive_exponent 12\n') fid.write('distance_dielectric yes\n') fid.write('dielectric_factor 4.0\n') fid.write('bump_filter yes\n') fid.write('bump_overlap 0.75\n') fid.write(f'receptor_file {rec_mol2}\n') fid.write('box_file tmp_box.pdb\n') fid.write(f'vdw_definition_file {VDW_DEFN_FILE}\n') fid.write(f'score_grid_prefix {p_grid_prefix}\n') ret = sp.run([GRID, '-i', 'grid.in', '-o', 'gridinfo.out'], stdout=sp.PIPE) try: ret.check_returncode() except sp.SubprocessError: print(f'ERROR: failed to generate grid from {rec_mol2}', file=sys.stderr) if ret.stderr: print(f'Message: {ret.stderr.decode("utf-8")}', file=sys.stderr) return None os.unlink('tmp_box.pdb') return str(p_grid_prefix)
import os import logging import csv import pandas as pd from collections import OrderedDict from bs4 import BeautifulSoup DUMMY_ANTECEDENT = None ##################### # GLOBAL PARAMETERS ##################### # Path "./data/*" assumes you are running from root folder, i.e. (python /src/baseline.py) # Use path "../data/*" if you are running from src folder, i.e. (cd src) and then (python baseline.py) COREF149_DIR = os.environ.get("COREF149_DIR", "../data/coref149") SENTICOREF_DIR = os.environ.get("SENTICOREF149_DIR", "../data/senticoref1_0") SENTICOREF_METADATA_DIR = "../data/senticoref_pos_stanza" SSJ_PATH = os.environ.get("SSJ_PATH", "../data/ssj500k-sl.TEI/ssj500k-sl.body.reduced.xml") def _read_tokens(corpus_soup): """ Obtain all tokens in current document. Arguments --------- corpus_soup: bs4.element.Tag Wrapped XML element containing the document (<tc:TextCorpus ...> tag). Returns ------- dict[str, str]: Mapping of token IDs to raw tokens """ id_to_tok = OrderedDict() for i, el in enumerate(corpus_soup.findAll("tc:token")): token_id = el["id"] token = el.text.strip() id_to_tok[token_id] = token return id_to_tok def _read_sentences(corpus_soup): """ Obtain all sentences in current document. Returns ------- tuple: (list[list[str]], dict[str, list]): (1.) token IDs, organized into sentences (2.) token IDs to [index of sentence, index of token inside sentence] """ sent_tok_ids = [] tok_to_position = {} for idx_sent, el in enumerate(corpus_soup.findAll("tc:sentence")): token_ids = el["tokenids"].split(" ") for idx_tok, tok in enumerate(token_ids): tok_to_position[tok] = [idx_sent, idx_tok] sent_tok_ids.append(token_ids) return sent_tok_ids, tok_to_position def _read_coreference(corpus_soup): """ Obtain all mentions and coreference clusters in current document. Returns ------- tuple: (dict[str, list[str]], list[list[str]]): (1.) mentions (2.) mentions organized by coreference cluster """ mentions = {} clusters = [] for cluster_obj in corpus_soup.findAll("tc:entity"): curr_cluster = [] for mention_obj in cluster_obj.findAll("tc:reference"): mention_id = mention_obj["id"] mention_tokens = mention_obj["tokenids"].split(" ") mentions[mention_id] = mention_tokens curr_cluster.append(mention_id) clusters.append(curr_cluster) return mentions, clusters # Create a dictionary where each mention points to its antecedent (or the dummy antecedent) def _coreference_chain(clusters_list): mapped_clusters = {} for curr_cluster in clusters_list: for i, curr_mention in enumerate(curr_cluster): mapped_clusters[curr_mention] = DUMMY_ANTECEDENT if i == 0 else curr_cluster[i - 1] return mapped_clusters class Token: def __init__(self, token_id, raw_text, lemma, msd, sentence_index, position_in_sentence, position_in_document): self.token_id = token_id self.raw_text = raw_text self.lemma = lemma self.msd = msd self.sentence_index = sentence_index self.position_in_sentence = position_in_sentence self.position_in_document = position_in_document if msd is not None: self.gender = self._extract_gender(msd) self.number = self._extract_number(msd) self.category = msd[0] def __str__(self): return f"Token(\"{self.raw_text}\")" def _extract_number(self, msd_string): number = None if msd_string[0] == "S" and len(msd_string) >= 4: # noun/samostalnik number = msd_string[3] elif msd_string[0] == "G" and len(msd_string) >= 6: # verb/glagol number = msd_string[5] # P = adjective (pridevnik), Z = pronoun (zaimek), K = numeral (števnik) elif msd_string[0] in {"P", "Z", "K"} and len(msd_string) >= 5: number = msd_string[4] return number def _extract_gender(self, msd_string): gender = None if msd_string[0] == "S" and len(msd_string) >= 3: # noun/samostalnik gender = msd_string[2] elif msd_string[0] == "G" and len(msd_string) >= 7: # verb/glagol gender = msd_string[6] # P = adjective (pridevnik), Z = pronoun (zaimek), K = numeral (števnik) elif msd_string[0] in {"P", "Z", "K"} and len(msd_string) >= 4: gender = msd_string[3] return gender class Mention: def __init__(self, mention_id, tokens): self.mention_id = mention_id self.tokens = tokens def __str__(self): return f"Mention(\"{" ".join([tok.raw_text for tok in self.tokens])}\")" def raw_text(self): return " ".join([t.raw_text for t in self.tokens]) def lemma_text(self): return " ".join([t.lemma for t in self.tokens if t.lemma is not None]) class Document: def __init__(self, doc_id, tokens, sentences, mentions, clusters, metadata=None): self.doc_id = doc_id # type: str self.tokens = tokens # type: dict self.sents = sentences # type: list self.mentions = mentions # type: dict self.clusters = clusters # type: list self.mapped_clusters = _coreference_chain(self.clusters) self.metadata = metadata def raw_sentences(self): """ Returns list of sentences in document. """ return [list(map(lambda t: self.tokens[t].raw_text, curr_sent)) for curr_sent in self.sents] def __len__(self): return len(self.tokens) def __str__(self): return f"Document('{self.doc_id}', {len(self.tokens)} tokens)" def sorted_mentions_dict(mentions): # sorted() produces an array of (key, value) tuples, which we turn back into dictionary sorted_mentions = dict(sorted(mentions.items(), key=lambda tup: (tup[1].tokens[0].sentence_index, # sentence tup[1].tokens[0].position_in_sentence, # start pos tup[1].tokens[-1].position_in_sentence))) # end pos return sorted_mentions def read_senticoref_doc(file_path): # Temporary cluster representation: # {cluster1 index: { mention1_idx: ['mention1', 'tokens'], mention2_idx: [...] }, cluster2_idx: {...} } _clusters = {} # Temporary buffer for current sentence _curr_sent = [] sents = [] id_to_tok = {} tok_to_position = {} idx_sent, idx_inside_sent = 0, 0 mentions, clusters = {}, [] doc_id = file_path.split(os.path.sep)[-1][:-4] # = file name without ".tsv" # Note: `quoting=csv.QUOTE_NONE` is required as otherwise some documents can't be read # Note: `keep_default_na=False` is required as there's a typo in corpus ("NA"), interpreted as <missing> curr_annotations = pd.read_table(file_path, comment="#", sep="\t", index_col=False, quoting=csv.QUOTE_NONE, names=["token_index", "start_end", "token", "NamedEntity", "Polarity", "referenceRelation", "referenceType"], keep_default_na=False) curr_metadata = pd.read_table(os.path.join(SENTICOREF_METADATA_DIR, f"{doc_id}.tsv"), sep="\t", index_col=False, quoting=csv.QUOTE_NONE, header=0, keep_default_na=False) metadata = {"tokens": {}} for i, (tok_id, ref_info, token) in enumerate(curr_annotations[["token_index", "referenceRelation", "token"]].values): # Token is part of some mention if ref_info != "_": # Token can be part of multiple mentions ref_annotations = ref_info.split("|") for mention_info in ref_annotations: cluster_idx, mention_idx = list(map(int, mention_info[3:].split("-"))) # skip "*->" curr_mentions = _clusters.get(cluster_idx, {}) curr_mention_tok_ids = curr_mentions.get(mention_idx, []) curr_mention_tok_ids.append(tok_id) curr_mentions[mention_idx] = curr_mention_tok_ids _clusters[cluster_idx] = curr_mentions _curr_sent.append(tok_id) tok_to_position[tok_id] = [idx_sent, idx_inside_sent] id_to_tok[tok_id] = token idx_inside_sent += 1 text, pos_tag, lemma = curr_metadata.iloc[i].values metadata["tokens"][tok_id] = {"ana": pos_tag, "lemma": lemma, "text": text} # Segment sentences heuristically if token in {".", "!", "?"}: idx_sent += 1 idx_inside_sent = 0 sents.append(_curr_sent) _curr_sent = [] # If the document doesn't end with proper punctuation if len(_curr_sent) > 0: sents.append(_curr_sent) # --- generate token objects final_tokens = OrderedDict() for index, (tok_id, tok_raw) in enumerate(id_to_tok.items()): final_tokens[tok_id] = Token( tok_id, tok_raw, metadata["tokens"][tok_id]["lemma"] if "lemma" in metadata["tokens"][tok_id] else None, metadata["tokens"][tok_id]["ana"].split(":")[1], tok_to_position[tok_id][0], tok_to_position[tok_id][1], index ) # --- mention_counter = 0 for idx_cluster, curr_mentions in _clusters.items(): curr_cluster = [] for idx_mention, mention_tok_ids in curr_mentions.items(): # assign coref149-style IDs to mentions mention_id = f"rc_{mention_counter}" mention_tokens = list(map(lambda tok_id: final_tokens[tok_id], mention_tok_ids)) mentions[mention_id] = Mention(mention_id, mention_tokens) curr_cluster.append(mention_id) mention_counter += 1 clusters.append(curr_cluster) return Document(doc_id, final_tokens, sents, sorted_mentions_dict(mentions), clusters, metadata=metadata) def read_coref149_doc(file_path, ssj_doc): with open(file_path, encoding="utf8") as f: content = f.readlines() content = "".join(content) soup = BeautifulSoup(content, "lxml").find("tc:textcorpus") doc_id = file_path.split(os.path.sep)[-1][:-4] # = file name without ".tcf" # Read data as defined in coref149 tokens = _read_tokens(soup) sents, tok_to_position = _read_sentences(soup) mentions, clusters = _read_coreference(soup) # Tokens have different IDs in ssj500k, so remap coref149 style to ssj500k style idx_sent_coref, idx_token_coref = 0, 0 _coref_to_ssj = {} # mapping from coref ids to ssj ids for curr_sent in ssj_doc.findAll("s"): for curr_token in curr_sent.findAll(["w", "pc"]): coref_token_id = sents[idx_sent_coref][idx_token_coref] ssj_token_id = curr_token["xml:id"] # Warn in case tokenization is different between datasets (we are slightly screwed in that case) if curr_token.text.strip() != tokens[coref_token_id]: logging.warning(f"MISMATCH! '{curr_token.text.strip()}' (ssj500k ID: {ssj_token_id}) vs " f"'{tokens[coref_token_id]}' (coref149 ID: {coref_token_id})") _coref_to_ssj[coref_token_id] = ssj_token_id idx_token_coref += 1 if idx_token_coref == len(sents[idx_sent_coref]): idx_sent_coref += 1 idx_token_coref = 0 # sentences are composed of ssj token IDs fixed_sents = [[_coref_to_ssj[curr_id] for curr_id in curr_sent] for curr_sent in sents] # Write all metadata for tokens # Note: currently not writing SRL/dependency metadata metadata = {"tokens": {}} for token in ssj_doc.findAll(["w", "c", "pc"]): token_id = token.get("xml:id", None) if token_id: metadata["tokens"][token_id] = token.attrs metadata["tokens"][token_id]["text"] = token.text final_tokens = OrderedDict() for index, (coref_token_id, raw_text) in enumerate(tokens.items()): ssj_token_id = _coref_to_ssj[coref_token_id] # mapping of coref token ID to ssj token ID final_tokens[ssj_token_id] = Token( ssj_token_id, raw_text, metadata["tokens"][ssj_token_id]["lemma"] if "lemma" in metadata["tokens"][ssj_token_id] else None, metadata["tokens"][ssj_token_id]["ana"].split(":")[1], tok_to_position[coref_token_id][0], # Note: tok_to_pos uses coref IDs, not ssj IDs tok_to_position[coref_token_id][1], index) final_mentions = {} for mention_id, mention_tokens in mentions.items(): token_objs = [final_tokens[_coref_to_ssj[tok_id]] for tok_id in mention_tokens] final_mentions[mention_id] = Mention(mention_id, token_objs) # TODO: is metadata required here? metadata for tokens has been moved to token object return Document(doc_id, final_tokens, fixed_sents, sorted_mentions_dict(final_mentions), clusters, metadata=metadata) def read_corpus(name): SUPPORTED_DATASETS = {"coref149", "senticoref"} if name not in SUPPORTED_DATASETS: raise ValueError(f"Unsupported dataset (must be one of {SUPPORTED_DATASETS})") if name == "coref149": with open(SSJ_PATH, encoding="utf8") as ssj: content = ssj.readlines() content = "".join(content) ssj_soup = BeautifulSoup(content, "lxml") doc_to_soup = {} for curr_soup in ssj_soup.findAll("p"): doc_to_soup[curr_soup["xml:id"]] = curr_soup doc_ids = [f[:-4] for f in os.listdir(COREF149_DIR) if os.path.isfile(os.path.join(COREF149_DIR, f)) and f.endswith(".tcf")] return [read_coref149_doc(os.path.join(COREF149_DIR, f"{curr_id}.tcf"), doc_to_soup[curr_id]) for curr_id in doc_ids] else: doc_ids = [f[:-4] for f in os.listdir(SENTICOREF_DIR) if os.path.isfile(os.path.join(SENTICOREF_DIR, f)) and f.endswith(".tsv")] return [read_senticoref_doc(os.path.join(SENTICOREF_DIR, f"{curr_id}.tsv")) for curr_id in doc_ids] if __name__ == "__main__": DATASET_NAME = "senticoref" documents = read_corpus(DATASET_NAME) print(f"Read {len(documents)} documents") # http://nl.ijs.si/ME/Vault/V5/msd/html/msd-sl.html#msd.categories-sl if DATASET_NAME == "senticoref": # English tags - because tags are predicted with Stanza char_tag_to_pos = dict(zip(["N", "V", "A", "R", "P", "M", "S", "C", "Q", "I", "Y", "X", "Z"], ["samostalnik", "glagol", "pridevnik", "prislov", "zaimek", "števnik", "predlog", "veznik", "členek", "medmet", "okrajšava", "neuvrščeno", "ločilo"])) elif DATASET_NAME == "coref149": char_tag_to_pos = dict(zip(["S", "G", "P", "R", "Z", "K", "D", "V", "L", "M", "O", "N", "U"], ["samostalnik", "glagol", "pridevnik", "prislov", "zaimek", "števnik", "predlog", "veznik", "členek", "medmet", "okrajšava", "neuvrščeno", "ločilo"])) pos_to_idx = {c: i for i, c in enumerate(char_tag_to_pos.values())} pos_count = [0 for _ in range(len(pos_to_idx))] for doc in documents: for mention_id, mention in doc.mentions.items(): first_token = mention.tokens[0] # type: Token curr_tag = char_tag_to_pos[first_token.msd[0]] pos_count[pos_to_idx[curr_tag]] += 1 print("besedna_vrsta,frekvenca") for curr_pos in pos_to_idx: print(f"{curr_pos},{pos_count[pos_to_idx[curr_pos]]}") entity_size_count = {} # entity/cluster size -> number of such entities mentions_by_documents = {} # number of mentions -> number of documents with this amount of mentions for doc in documents: num_mentions = 0 for curr_cluster in doc.clusters: cluster_size = len(curr_cluster) num_mentions += cluster_size entity_size_count[cluster_size] = entity_size_count.get(cluster_size, 0) + 1 mentions_by_documents[num_mentions] = mentions_by_documents.get(num_mentions, 0) + 1 print("\nvelikost_entitete,frekvenca") for curr_size, num_mentions in sorted(entity_size_count.items(), key=lambda tup: tup[0]): print(f"{curr_size},{num_mentions}") print("\nštevilo_omenitev_v_dokumentu,frekvenca") for curr_num_mentions, num_docs in sorted(mentions_by_documents.items(), key=lambda tup: tup[0]): print(f"{curr_num_mentions},{num_docs}") dist_between_mentions = {} # dist between consecutive mentions (in num. of mentions) -> frequency of this distance for doc in documents: sorted_mentions = sorted([(mention_id, curr_mention.tokens[0].position_in_document, curr_mention.tokens[-1].position_in_document) for mention_id, curr_mention in doc.mentions.items()], key=lambda triple: (triple[1], triple[2])) mention_id_to_rank = {mention_id: rank for rank, (mention_id, _, _) in enumerate(sorted_mentions)} for curr_cluster in doc.clusters: sorted_cluster = sorted(curr_cluster, key=lambda m_id: (doc.mentions[m_id].tokens[0].position_in_document, doc.mentions[m_id].tokens[-1].position_in_document)) for m1_id, m2_id in zip(sorted_cluster, sorted_cluster[1:]): # Distance 0 = mentions right next to eachother when ordered by position rank_diff = mention_id_to_rank[m2_id] - mention_id_to_rank[m1_id] - 1 dist_between_mentions[rank_diff] = dist_between_mentions.get(rank_diff, 0) + 1 print("\nrazdalja_med_zaporednima_omenitvama_iste_entitete,frekvenca") for curr_dist, num_mentions in sorted(dist_between_mentions.items(), key=lambda tup: tup[0]): print(f"{curr_dist},{num_mentions}")
import os import logging import csv import pandas as pd from collections import OrderedDict from bs4 import BeautifulSoup DUMMY_ANTECEDENT = None ##################### # GLOBAL PARAMETERS ##################### # Path "./data/*" assumes you are running from root folder, i.e. (python /src/baseline.py) # Use path "../data/*" if you are running from src folder, i.e. (cd src) and then (python baseline.py) COREF149_DIR = os.environ.get("COREF149_DIR", "../data/coref149") SENTICOREF_DIR = os.environ.get("SENTICOREF149_DIR", "../data/senticoref1_0") SENTICOREF_METADATA_DIR = "../data/senticoref_pos_stanza" SSJ_PATH = os.environ.get("SSJ_PATH", "../data/ssj500k-sl.TEI/ssj500k-sl.body.reduced.xml") def _read_tokens(corpus_soup): """ Obtain all tokens in current document. Arguments --------- corpus_soup: bs4.element.Tag Wrapped XML element containing the document (<tc:TextCorpus ...> tag). Returns ------- dict[str, str]: Mapping of token IDs to raw tokens """ id_to_tok = OrderedDict() for i, el in enumerate(corpus_soup.findAll("tc:token")): token_id = el["id"] token = el.text.strip() id_to_tok[token_id] = token return id_to_tok def _read_sentences(corpus_soup): """ Obtain all sentences in current document. Returns ------- tuple: (list[list[str]], dict[str, list]): (1.) token IDs, organized into sentences (2.) token IDs to [index of sentence, index of token inside sentence] """ sent_tok_ids = [] tok_to_position = {} for idx_sent, el in enumerate(corpus_soup.findAll("tc:sentence")): token_ids = el["tokenids"].split(" ") for idx_tok, tok in enumerate(token_ids): tok_to_position[tok] = [idx_sent, idx_tok] sent_tok_ids.append(token_ids) return sent_tok_ids, tok_to_position def _read_coreference(corpus_soup): """ Obtain all mentions and coreference clusters in current document. Returns ------- tuple: (dict[str, list[str]], list[list[str]]): (1.) mentions (2.) mentions organized by coreference cluster """ mentions = {} clusters = [] for cluster_obj in corpus_soup.findAll("tc:entity"): curr_cluster = [] for mention_obj in cluster_obj.findAll("tc:reference"): mention_id = mention_obj["id"] mention_tokens = mention_obj["tokenids"].split(" ") mentions[mention_id] = mention_tokens curr_cluster.append(mention_id) clusters.append(curr_cluster) return mentions, clusters # Create a dictionary where each mention points to its antecedent (or the dummy antecedent) def _coreference_chain(clusters_list): mapped_clusters = {} for curr_cluster in clusters_list: for i, curr_mention in enumerate(curr_cluster): mapped_clusters[curr_mention] = DUMMY_ANTECEDENT if i == 0 else curr_cluster[i - 1] return mapped_clusters class Token: def __init__(self, token_id, raw_text, lemma, msd, sentence_index, position_in_sentence, position_in_document): self.token_id = token_id self.raw_text = raw_text self.lemma = lemma self.msd = msd self.sentence_index = sentence_index self.position_in_sentence = position_in_sentence self.position_in_document = position_in_document if msd is not None: self.gender = self._extract_gender(msd) self.number = self._extract_number(msd) self.category = msd[0] def __str__(self): return f"Token(\"{self.raw_text}\")" def _extract_number(self, msd_string): number = None if msd_string[0] == "S" and len(msd_string) >= 4: # noun/samostalnik number = msd_string[3] elif msd_string[0] == "G" and len(msd_string) >= 6: # verb/glagol number = msd_string[5] # P = adjective (pridevnik), Z = pronoun (zaimek), K = numeral (števnik) elif msd_string[0] in {"P", "Z", "K"} and len(msd_string) >= 5: number = msd_string[4] return number def _extract_gender(self, msd_string): gender = None if msd_string[0] == "S" and len(msd_string) >= 3: # noun/samostalnik gender = msd_string[2] elif msd_string[0] == "G" and len(msd_string) >= 7: # verb/glagol gender = msd_string[6] # P = adjective (pridevnik), Z = pronoun (zaimek), K = numeral (števnik) elif msd_string[0] in {"P", "Z", "K"} and len(msd_string) >= 4: gender = msd_string[3] return gender class Mention: def __init__(self, mention_id, tokens): self.mention_id = mention_id self.tokens = tokens def __str__(self): return f"Mention(\"{' '.join([tok.raw_text for tok in self.tokens])}\")" def raw_text(self): return " ".join([t.raw_text for t in self.tokens]) def lemma_text(self): return " ".join([t.lemma for t in self.tokens if t.lemma is not None]) class Document: def __init__(self, doc_id, tokens, sentences, mentions, clusters, metadata=None): self.doc_id = doc_id # type: str self.tokens = tokens # type: dict self.sents = sentences # type: list self.mentions = mentions # type: dict self.clusters = clusters # type: list self.mapped_clusters = _coreference_chain(self.clusters) self.metadata = metadata def raw_sentences(self): """ Returns list of sentences in document. """ return [list(map(lambda t: self.tokens[t].raw_text, curr_sent)) for curr_sent in self.sents] def __len__(self): return len(self.tokens) def __str__(self): return f"Document('{self.doc_id}', {len(self.tokens)} tokens)" def sorted_mentions_dict(mentions): # sorted() produces an array of (key, value) tuples, which we turn back into dictionary sorted_mentions = dict(sorted(mentions.items(), key=lambda tup: (tup[1].tokens[0].sentence_index, # sentence tup[1].tokens[0].position_in_sentence, # start pos tup[1].tokens[-1].position_in_sentence))) # end pos return sorted_mentions def read_senticoref_doc(file_path): # Temporary cluster representation: # {cluster1 index: { mention1_idx: ['mention1', 'tokens'], mention2_idx: [...] }, cluster2_idx: {...} } _clusters = {} # Temporary buffer for current sentence _curr_sent = [] sents = [] id_to_tok = {} tok_to_position = {} idx_sent, idx_inside_sent = 0, 0 mentions, clusters = {}, [] doc_id = file_path.split(os.path.sep)[-1][:-4] # = file name without ".tsv" # Note: `quoting=csv.QUOTE_NONE` is required as otherwise some documents can't be read # Note: `keep_default_na=False` is required as there's a typo in corpus ("NA"), interpreted as <missing> curr_annotations = pd.read_table(file_path, comment="#", sep="\t", index_col=False, quoting=csv.QUOTE_NONE, names=["token_index", "start_end", "token", "NamedEntity", "Polarity", "referenceRelation", "referenceType"], keep_default_na=False) curr_metadata = pd.read_table(os.path.join(SENTICOREF_METADATA_DIR, f"{doc_id}.tsv"), sep="\t", index_col=False, quoting=csv.QUOTE_NONE, header=0, keep_default_na=False) metadata = {"tokens": {}} for i, (tok_id, ref_info, token) in enumerate(curr_annotations[["token_index", "referenceRelation", "token"]].values): # Token is part of some mention if ref_info != "_": # Token can be part of multiple mentions ref_annotations = ref_info.split("|") for mention_info in ref_annotations: cluster_idx, mention_idx = list(map(int, mention_info[3:].split("-"))) # skip "*->" curr_mentions = _clusters.get(cluster_idx, {}) curr_mention_tok_ids = curr_mentions.get(mention_idx, []) curr_mention_tok_ids.append(tok_id) curr_mentions[mention_idx] = curr_mention_tok_ids _clusters[cluster_idx] = curr_mentions _curr_sent.append(tok_id) tok_to_position[tok_id] = [idx_sent, idx_inside_sent] id_to_tok[tok_id] = token idx_inside_sent += 1 text, pos_tag, lemma = curr_metadata.iloc[i].values metadata["tokens"][tok_id] = {"ana": pos_tag, "lemma": lemma, "text": text} # Segment sentences heuristically if token in {".", "!", "?"}: idx_sent += 1 idx_inside_sent = 0 sents.append(_curr_sent) _curr_sent = [] # If the document doesn't end with proper punctuation if len(_curr_sent) > 0: sents.append(_curr_sent) # --- generate token objects final_tokens = OrderedDict() for index, (tok_id, tok_raw) in enumerate(id_to_tok.items()): final_tokens[tok_id] = Token( tok_id, tok_raw, metadata["tokens"][tok_id]["lemma"] if "lemma" in metadata["tokens"][tok_id] else None, metadata["tokens"][tok_id]["ana"].split(":")[1], tok_to_position[tok_id][0], tok_to_position[tok_id][1], index ) # --- mention_counter = 0 for idx_cluster, curr_mentions in _clusters.items(): curr_cluster = [] for idx_mention, mention_tok_ids in curr_mentions.items(): # assign coref149-style IDs to mentions mention_id = f"rc_{mention_counter}" mention_tokens = list(map(lambda tok_id: final_tokens[tok_id], mention_tok_ids)) mentions[mention_id] = Mention(mention_id, mention_tokens) curr_cluster.append(mention_id) mention_counter += 1 clusters.append(curr_cluster) return Document(doc_id, final_tokens, sents, sorted_mentions_dict(mentions), clusters, metadata=metadata) def read_coref149_doc(file_path, ssj_doc): with open(file_path, encoding="utf8") as f: content = f.readlines() content = "".join(content) soup = BeautifulSoup(content, "lxml").find("tc:textcorpus") doc_id = file_path.split(os.path.sep)[-1][:-4] # = file name without ".tcf" # Read data as defined in coref149 tokens = _read_tokens(soup) sents, tok_to_position = _read_sentences(soup) mentions, clusters = _read_coreference(soup) # Tokens have different IDs in ssj500k, so remap coref149 style to ssj500k style idx_sent_coref, idx_token_coref = 0, 0 _coref_to_ssj = {} # mapping from coref ids to ssj ids for curr_sent in ssj_doc.findAll("s"): for curr_token in curr_sent.findAll(["w", "pc"]): coref_token_id = sents[idx_sent_coref][idx_token_coref] ssj_token_id = curr_token["xml:id"] # Warn in case tokenization is different between datasets (we are slightly screwed in that case) if curr_token.text.strip() != tokens[coref_token_id]: logging.warning(f"MISMATCH! '{curr_token.text.strip()}' (ssj500k ID: {ssj_token_id}) vs " f"'{tokens[coref_token_id]}' (coref149 ID: {coref_token_id})") _coref_to_ssj[coref_token_id] = ssj_token_id idx_token_coref += 1 if idx_token_coref == len(sents[idx_sent_coref]): idx_sent_coref += 1 idx_token_coref = 0 # sentences are composed of ssj token IDs fixed_sents = [[_coref_to_ssj[curr_id] for curr_id in curr_sent] for curr_sent in sents] # Write all metadata for tokens # Note: currently not writing SRL/dependency metadata metadata = {"tokens": {}} for token in ssj_doc.findAll(["w", "c", "pc"]): token_id = token.get("xml:id", None) if token_id: metadata["tokens"][token_id] = token.attrs metadata["tokens"][token_id]["text"] = token.text final_tokens = OrderedDict() for index, (coref_token_id, raw_text) in enumerate(tokens.items()): ssj_token_id = _coref_to_ssj[coref_token_id] # mapping of coref token ID to ssj token ID final_tokens[ssj_token_id] = Token( ssj_token_id, raw_text, metadata["tokens"][ssj_token_id]["lemma"] if "lemma" in metadata["tokens"][ssj_token_id] else None, metadata["tokens"][ssj_token_id]["ana"].split(":")[1], tok_to_position[coref_token_id][0], # Note: tok_to_pos uses coref IDs, not ssj IDs tok_to_position[coref_token_id][1], index) final_mentions = {} for mention_id, mention_tokens in mentions.items(): token_objs = [final_tokens[_coref_to_ssj[tok_id]] for tok_id in mention_tokens] final_mentions[mention_id] = Mention(mention_id, token_objs) # TODO: is metadata required here? metadata for tokens has been moved to token object return Document(doc_id, final_tokens, fixed_sents, sorted_mentions_dict(final_mentions), clusters, metadata=metadata) def read_corpus(name): SUPPORTED_DATASETS = {"coref149", "senticoref"} if name not in SUPPORTED_DATASETS: raise ValueError(f"Unsupported dataset (must be one of {SUPPORTED_DATASETS})") if name == "coref149": with open(SSJ_PATH, encoding="utf8") as ssj: content = ssj.readlines() content = "".join(content) ssj_soup = BeautifulSoup(content, "lxml") doc_to_soup = {} for curr_soup in ssj_soup.findAll("p"): doc_to_soup[curr_soup["xml:id"]] = curr_soup doc_ids = [f[:-4] for f in os.listdir(COREF149_DIR) if os.path.isfile(os.path.join(COREF149_DIR, f)) and f.endswith(".tcf")] return [read_coref149_doc(os.path.join(COREF149_DIR, f"{curr_id}.tcf"), doc_to_soup[curr_id]) for curr_id in doc_ids] else: doc_ids = [f[:-4] for f in os.listdir(SENTICOREF_DIR) if os.path.isfile(os.path.join(SENTICOREF_DIR, f)) and f.endswith(".tsv")] return [read_senticoref_doc(os.path.join(SENTICOREF_DIR, f"{curr_id}.tsv")) for curr_id in doc_ids] if __name__ == "__main__": DATASET_NAME = "senticoref" documents = read_corpus(DATASET_NAME) print(f"Read {len(documents)} documents") # http://nl.ijs.si/ME/Vault/V5/msd/html/msd-sl.html#msd.categories-sl if DATASET_NAME == "senticoref": # English tags - because tags are predicted with Stanza char_tag_to_pos = dict(zip(["N", "V", "A", "R", "P", "M", "S", "C", "Q", "I", "Y", "X", "Z"], ["samostalnik", "glagol", "pridevnik", "prislov", "zaimek", "števnik", "predlog", "veznik", "členek", "medmet", "okrajšava", "neuvrščeno", "ločilo"])) elif DATASET_NAME == "coref149": char_tag_to_pos = dict(zip(["S", "G", "P", "R", "Z", "K", "D", "V", "L", "M", "O", "N", "U"], ["samostalnik", "glagol", "pridevnik", "prislov", "zaimek", "števnik", "predlog", "veznik", "členek", "medmet", "okrajšava", "neuvrščeno", "ločilo"])) pos_to_idx = {c: i for i, c in enumerate(char_tag_to_pos.values())} pos_count = [0 for _ in range(len(pos_to_idx))] for doc in documents: for mention_id, mention in doc.mentions.items(): first_token = mention.tokens[0] # type: Token curr_tag = char_tag_to_pos[first_token.msd[0]] pos_count[pos_to_idx[curr_tag]] += 1 print("besedna_vrsta,frekvenca") for curr_pos in pos_to_idx: print(f"{curr_pos},{pos_count[pos_to_idx[curr_pos]]}") entity_size_count = {} # entity/cluster size -> number of such entities mentions_by_documents = {} # number of mentions -> number of documents with this amount of mentions for doc in documents: num_mentions = 0 for curr_cluster in doc.clusters: cluster_size = len(curr_cluster) num_mentions += cluster_size entity_size_count[cluster_size] = entity_size_count.get(cluster_size, 0) + 1 mentions_by_documents[num_mentions] = mentions_by_documents.get(num_mentions, 0) + 1 print("\nvelikost_entitete,frekvenca") for curr_size, num_mentions in sorted(entity_size_count.items(), key=lambda tup: tup[0]): print(f"{curr_size},{num_mentions}") print("\nštevilo_omenitev_v_dokumentu,frekvenca") for curr_num_mentions, num_docs in sorted(mentions_by_documents.items(), key=lambda tup: tup[0]): print(f"{curr_num_mentions},{num_docs}") dist_between_mentions = {} # dist between consecutive mentions (in num. of mentions) -> frequency of this distance for doc in documents: sorted_mentions = sorted([(mention_id, curr_mention.tokens[0].position_in_document, curr_mention.tokens[-1].position_in_document) for mention_id, curr_mention in doc.mentions.items()], key=lambda triple: (triple[1], triple[2])) mention_id_to_rank = {mention_id: rank for rank, (mention_id, _, _) in enumerate(sorted_mentions)} for curr_cluster in doc.clusters: sorted_cluster = sorted(curr_cluster, key=lambda m_id: (doc.mentions[m_id].tokens[0].position_in_document, doc.mentions[m_id].tokens[-1].position_in_document)) for m1_id, m2_id in zip(sorted_cluster, sorted_cluster[1:]): # Distance 0 = mentions right next to eachother when ordered by position rank_diff = mention_id_to_rank[m2_id] - mention_id_to_rank[m1_id] - 1 dist_between_mentions[rank_diff] = dist_between_mentions.get(rank_diff, 0) + 1 print("\nrazdalja_med_zaporednima_omenitvama_iste_entitete,frekvenca") for curr_dist, num_mentions in sorted(dist_between_mentions.items(), key=lambda tup: tup[0]): print(f"{curr_dist},{num_mentions}")
from skt.vault_utils import get_secrets def get_hive_conn(): from pyhive import hive hiveserver2 = get_secrets(path="ye/hiveserver2") host = hiveserver2["ip"] port = hiveserver2["port"] user = hiveserver2["user"] conn = hive.connect(host, port=port, username=user) return conn def get_hdfs_conn(): import os import pyarrow os.environ["ARROW_LIBHDFS_DIR"] = "/usr/hdp/3.0.1.0-187/usr/lib" conn = pyarrow.hdfs.connect(user="airflow") return conn def get_sqlalchemy_engine(): from sqlalchemy import create_engine hiveserver2 = get_secrets(path="ye/hiveserver2") host = hiveserver2["ip"] port = hiveserver2["port"] user = hiveserver2["user"] return create_engine(f"hive://{user}@{host}:{port}/tmp") def get_pkl_from_hdfs(pkl_path): import pickle conn = get_hdfs_conn() byte_object = conn.cat(f"{pkl_path}") pkl_object = pickle.loads(byte_object) return pkl_object def get_spark(scale=0, queue=None): import os import uuid import tempfile from pyspark.sql import SparkSession from skt.vault_utils import get_secrets tmp_uuid = str(uuid.uuid4()) app_name = f"skt-{os.environ.get("USER", "default")}-{tmp_uuid}" if not queue: if "JUPYTERHUB_USER" in os.environ: queue = "dmig_eda" else: queue = "airflow_job" os.environ["ARROW_PRE_0_15_IPC_FORMAT"] = "1" key = get_secrets("gcp/sktaic-datahub/dataflow")["config"] key_file_name = tempfile.mkstemp()[1] with open(key_file_name, "wb") as key_file: key_file.write(key.encode()) os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = key_file.name if scale in [1, 2, 3, 4]: spark = ( SparkSession.builder.config("spark.app.name", app_name) .config("spark.driver.memory", f"{scale*8}g") .config("spark.executor.memory", f"{scale*3}g") .config("spark.executor.instances", f"{scale*8}") .config("spark.driver.maxResultSize", f"{scale*4}g") .config("spark.rpc.message.maxSize", "1024") .config("spark.yarn.queue", queue) .config("spark.ui.enabled", "false") .config("spark.port.maxRetries", "128") .config("spark.executorEnv.ARROW_PRE_0_15_IPC_FORMAT", "1") .config("spark.yarn.appMasterEnv.ARROW_PRE_0_15_IPC_FORMAT", "1") .config("spark.jars", "gs://external_libs/spark/jars/spark-bigquery-with-dependencies_2.11-0.16.1.jar",) .enableHiveSupport() .getOrCreate() ) else: spark = ( SparkSession.builder.config("spark.app.name", app_name) .config("spark.driver.memory", "6g") .config("spark.executor.memory", "8g") .config("spark.shuffle.service.enabled", "true") .config("spark.dynamicAllocation.enabled", "true") .config("spark.dynamicAllocation.maxExecutors", "200") .config("spark.driver.maxResultSize", "6g") .config("spark.rpc.message.maxSize", "1024") .config("spark.yarn.queue", queue) .config("spark.ui.enabled", "false") .config("spark.port.maxRetries", "128") .config("spark.executorEnv.ARROW_PRE_0_15_IPC_FORMAT", "1") .config("spark.yarn.appMasterEnv.ARROW_PRE_0_15_IPC_FORMAT", "1") .config("spark.jars", "gs://external_libs/spark/jars/spark-bigquery-with-dependencies_2.11-0.16.1.jar",) .enableHiveSupport() .getOrCreate() ) spark.conf.set("spark.sql.execution.arrow.enabled", "true") return spark def hive_execute(query): conn = get_hive_conn() c = conn.cursor() c.execute(query) c.close() conn.close() def hive_get_result(query): conn = get_hive_conn() c = conn.cursor() c.execute(query) result = c.fetchall() c.close() conn.close() return result def hive_to_pandas(query, scale=0): if scale == 1: import pandas conn = get_hive_conn() df = pandas.read_sql(query, conn) df.info() conn.close() return df import uuid tmp_id = str(uuid.uuid4()).replace("-", "_") ctas = f"CREATE TABLE dumbo.{tmp_id} stored as parquet as {query}" conn = get_hive_conn() c = conn.cursor() c.execute("set parquet.column.index.access=false") c.execute(ctas) hdfs = get_hdfs_conn() table_path = hdfs.ls(f"/warehouse/tablespace/managed/hive/dumbo.db/{tmp_id}")[0] hdfs.close() df = parquet_to_pandas(table_path) c.execute(f"DROP TABLE dumbo.{tmp_id}") c.close() conn.close() return df def parquet_to_pandas(hdfs_path): from pyarrow import parquet hdfs = get_hdfs_conn() df = parquet.read_table(hdfs_path, filesystem=hdfs).to_pandas() df.info() return df def pandas_to_parquet(pandas_df, hdfs_path, spark): df = spark.createDataFrame(pandas_df) df.write.mode("overwrite").parquet(hdfs_path) def slack_send( text="This is default text", username="SKT", channel="#leavemealone", icon_emoji=":large_blue_circle:", blocks=None, dataframe=False, ): import requests from skt.vault_utils import get_secrets if dataframe: from tabulate import tabulate text = "```" + tabulate(text, tablefmt="simple", headers="keys") + "```" token = get_secrets("slack")["bot_token"]["airflow"] proxy = get_secrets("proxy")["proxy"] proxies = { "http": proxy, "https": proxy, } headers = { "Content-Type": "application/json;charset=utf-8", "Authorization": f"Bearer {token}", } json_body = { "username": username, "channel": channel, "text": text, "blocks": blocks, "icon_emoji": icon_emoji, } r = requests.post("https://www.slack.com/api/chat.postMessage", proxies=proxies, headers=headers, json=json_body,) r.raise_for_status() if not r.json()["ok"]: raise Exception(r.json()) def get_github_util(): from skt.github_utils import GithubUtil github_token = get_secrets("github/sktaiflow")["token"] proxy = get_secrets("proxy")["proxy"] proxies = { "http": proxy, "https": proxy, } g = GithubUtil(github_token, proxies) return g def _write_to_parquet_via_spark(pandas_df, hdfs_path): spark = get_spark() spark_df = spark.createDataFrame(pandas_df) spark_df.write.mode("overwrite").parquet(hdfs_path) def _write_to_parquet(pandas_df, hdfs_path): import pyarrow as pa import pyarrow.parquet as pq # Read Parquet INT64 timestamp issue: # https://issues.apache.org/jira/browse/HIVE-21215 if "datetime64[ns]" in pandas_df.dtypes.tolist(): _write_to_parquet_via_spark(pandas_df, hdfs_path) return pa_table = pa.Table.from_pandas(pandas_df) hdfs_conn = get_hdfs_conn() try: pq.write_to_dataset(pa_table, root_path=hdfs_path, filesystem=hdfs_conn) finally: hdfs_conn.close() def _write_df(pandas_df, schema_name, table_name, hdfs_path, engine, cursor, tmp_table_name): import sqlalchemy.exc cursor.execute(f"drop table if exists {schema_name}.{tmp_table_name}") try: pandas_df.to_sql(tmp_table_name, engine, schema=schema_name, if_exists="replace", index=False) except sqlalchemy.exc.ProgrammingError: # Hive bulk insert issue: # https://github.com/dropbox/PyHive/issues/343 pass cursor.execute(f"drop table if exists {schema_name}.{table_name}") if hdfs_path is None: cursor.execute( f"""create table {schema_name}.{table_name} like {schema_name}.{tmp_table_name} stored as parquet""" ) cursor.execute(f"show create table {schema_name}.{table_name}") result = cursor.fetchall() managed_hdfs_path = list(filter(lambda row: row[0].strip().find("hdfs://") == 1, result))[0][0].strip()[1:-1] _write_to_parquet(pandas_df, managed_hdfs_path) else: cursor.execute( f"""create external table {schema_name}.{table_name} like {schema_name}.{tmp_table_name} stored as parquet location '{hdfs_path}'""" ) def write_df_to_hive(pandas_df, schema_name, table_name, hdfs_path=None): """ Exports a Panadas dataframe into a table in Hive. Example: write_df_to_hive(pandas_df1, "my_schema", "my_table1") write_df_to_hive(pandas_df2, "my_schema", "my_table2") write_df_to_hive(pandas_df1, "my_schema", "my_table3", hdfs_path="hdfs://.../my_schema.db/my_table1") Parameters ---------- pandas_df : an ojbect of Pandas Dataframe schema_name : str A target schema name of Hive table_name : str A target table name of Hive hdfs_path : str, default None A path of Hadoop file system as an optional parameter. It will be used to create an external table. If hdfs_path is not None, data in the dataframe will not be converted. A metadata in the dataframe is just used to create a Hive table. """ engine = get_sqlalchemy_engine() conn = get_hive_conn() cursor = conn.cursor() import hashlib tmp_table_name = hashlib.sha1(str(f"{schema_name}.{table_name}").encode("utf-8")).hexdigest() try: _write_df(pandas_df, schema_name, table_name, hdfs_path, engine, cursor, tmp_table_name) finally: cursor.execute(f"drop table if exists {schema_name}.{tmp_table_name}") cursor.close() conn.close()
from skt.vault_utils import get_secrets def get_hive_conn(): from pyhive import hive hiveserver2 = get_secrets(path="ye/hiveserver2") host = hiveserver2["ip"] port = hiveserver2["port"] user = hiveserver2["user"] conn = hive.connect(host, port=port, username=user) return conn def get_hdfs_conn(): import os import pyarrow os.environ["ARROW_LIBHDFS_DIR"] = "/usr/hdp/3.0.1.0-187/usr/lib" conn = pyarrow.hdfs.connect(user="airflow") return conn def get_sqlalchemy_engine(): from sqlalchemy import create_engine hiveserver2 = get_secrets(path="ye/hiveserver2") host = hiveserver2["ip"] port = hiveserver2["port"] user = hiveserver2["user"] return create_engine(f"hive://{user}@{host}:{port}/tmp") def get_pkl_from_hdfs(pkl_path): import pickle conn = get_hdfs_conn() byte_object = conn.cat(f"{pkl_path}") pkl_object = pickle.loads(byte_object) return pkl_object def get_spark(scale=0, queue=None): import os import uuid import tempfile from pyspark.sql import SparkSession from skt.vault_utils import get_secrets tmp_uuid = str(uuid.uuid4()) app_name = f"skt-{os.environ.get('USER', 'default')}-{tmp_uuid}" if not queue: if "JUPYTERHUB_USER" in os.environ: queue = "dmig_eda" else: queue = "airflow_job" os.environ["ARROW_PRE_0_15_IPC_FORMAT"] = "1" key = get_secrets("gcp/sktaic-datahub/dataflow")["config"] key_file_name = tempfile.mkstemp()[1] with open(key_file_name, "wb") as key_file: key_file.write(key.encode()) os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = key_file.name if scale in [1, 2, 3, 4]: spark = ( SparkSession.builder.config("spark.app.name", app_name) .config("spark.driver.memory", f"{scale*8}g") .config("spark.executor.memory", f"{scale*3}g") .config("spark.executor.instances", f"{scale*8}") .config("spark.driver.maxResultSize", f"{scale*4}g") .config("spark.rpc.message.maxSize", "1024") .config("spark.yarn.queue", queue) .config("spark.ui.enabled", "false") .config("spark.port.maxRetries", "128") .config("spark.executorEnv.ARROW_PRE_0_15_IPC_FORMAT", "1") .config("spark.yarn.appMasterEnv.ARROW_PRE_0_15_IPC_FORMAT", "1") .config("spark.jars", "gs://external_libs/spark/jars/spark-bigquery-with-dependencies_2.11-0.16.1.jar",) .enableHiveSupport() .getOrCreate() ) else: spark = ( SparkSession.builder.config("spark.app.name", app_name) .config("spark.driver.memory", "6g") .config("spark.executor.memory", "8g") .config("spark.shuffle.service.enabled", "true") .config("spark.dynamicAllocation.enabled", "true") .config("spark.dynamicAllocation.maxExecutors", "200") .config("spark.driver.maxResultSize", "6g") .config("spark.rpc.message.maxSize", "1024") .config("spark.yarn.queue", queue) .config("spark.ui.enabled", "false") .config("spark.port.maxRetries", "128") .config("spark.executorEnv.ARROW_PRE_0_15_IPC_FORMAT", "1") .config("spark.yarn.appMasterEnv.ARROW_PRE_0_15_IPC_FORMAT", "1") .config("spark.jars", "gs://external_libs/spark/jars/spark-bigquery-with-dependencies_2.11-0.16.1.jar",) .enableHiveSupport() .getOrCreate() ) spark.conf.set("spark.sql.execution.arrow.enabled", "true") return spark def hive_execute(query): conn = get_hive_conn() c = conn.cursor() c.execute(query) c.close() conn.close() def hive_get_result(query): conn = get_hive_conn() c = conn.cursor() c.execute(query) result = c.fetchall() c.close() conn.close() return result def hive_to_pandas(query, scale=0): if scale == 1: import pandas conn = get_hive_conn() df = pandas.read_sql(query, conn) df.info() conn.close() return df import uuid tmp_id = str(uuid.uuid4()).replace("-", "_") ctas = f"CREATE TABLE dumbo.{tmp_id} stored as parquet as {query}" conn = get_hive_conn() c = conn.cursor() c.execute("set parquet.column.index.access=false") c.execute(ctas) hdfs = get_hdfs_conn() table_path = hdfs.ls(f"/warehouse/tablespace/managed/hive/dumbo.db/{tmp_id}")[0] hdfs.close() df = parquet_to_pandas(table_path) c.execute(f"DROP TABLE dumbo.{tmp_id}") c.close() conn.close() return df def parquet_to_pandas(hdfs_path): from pyarrow import parquet hdfs = get_hdfs_conn() df = parquet.read_table(hdfs_path, filesystem=hdfs).to_pandas() df.info() return df def pandas_to_parquet(pandas_df, hdfs_path, spark): df = spark.createDataFrame(pandas_df) df.write.mode("overwrite").parquet(hdfs_path) def slack_send( text="This is default text", username="SKT", channel="#leavemealone", icon_emoji=":large_blue_circle:", blocks=None, dataframe=False, ): import requests from skt.vault_utils import get_secrets if dataframe: from tabulate import tabulate text = "```" + tabulate(text, tablefmt="simple", headers="keys") + "```" token = get_secrets("slack")["bot_token"]["airflow"] proxy = get_secrets("proxy")["proxy"] proxies = { "http": proxy, "https": proxy, } headers = { "Content-Type": "application/json;charset=utf-8", "Authorization": f"Bearer {token}", } json_body = { "username": username, "channel": channel, "text": text, "blocks": blocks, "icon_emoji": icon_emoji, } r = requests.post("https://www.slack.com/api/chat.postMessage", proxies=proxies, headers=headers, json=json_body,) r.raise_for_status() if not r.json()["ok"]: raise Exception(r.json()) def get_github_util(): from skt.github_utils import GithubUtil github_token = get_secrets("github/sktaiflow")["token"] proxy = get_secrets("proxy")["proxy"] proxies = { "http": proxy, "https": proxy, } g = GithubUtil(github_token, proxies) return g def _write_to_parquet_via_spark(pandas_df, hdfs_path): spark = get_spark() spark_df = spark.createDataFrame(pandas_df) spark_df.write.mode("overwrite").parquet(hdfs_path) def _write_to_parquet(pandas_df, hdfs_path): import pyarrow as pa import pyarrow.parquet as pq # Read Parquet INT64 timestamp issue: # https://issues.apache.org/jira/browse/HIVE-21215 if "datetime64[ns]" in pandas_df.dtypes.tolist(): _write_to_parquet_via_spark(pandas_df, hdfs_path) return pa_table = pa.Table.from_pandas(pandas_df) hdfs_conn = get_hdfs_conn() try: pq.write_to_dataset(pa_table, root_path=hdfs_path, filesystem=hdfs_conn) finally: hdfs_conn.close() def _write_df(pandas_df, schema_name, table_name, hdfs_path, engine, cursor, tmp_table_name): import sqlalchemy.exc cursor.execute(f"drop table if exists {schema_name}.{tmp_table_name}") try: pandas_df.to_sql(tmp_table_name, engine, schema=schema_name, if_exists="replace", index=False) except sqlalchemy.exc.ProgrammingError: # Hive bulk insert issue: # https://github.com/dropbox/PyHive/issues/343 pass cursor.execute(f"drop table if exists {schema_name}.{table_name}") if hdfs_path is None: cursor.execute( f"""create table {schema_name}.{table_name} like {schema_name}.{tmp_table_name} stored as parquet""" ) cursor.execute(f"show create table {schema_name}.{table_name}") result = cursor.fetchall() managed_hdfs_path = list(filter(lambda row: row[0].strip().find("hdfs://") == 1, result))[0][0].strip()[1:-1] _write_to_parquet(pandas_df, managed_hdfs_path) else: cursor.execute( f"""create external table {schema_name}.{table_name} like {schema_name}.{tmp_table_name} stored as parquet location '{hdfs_path}'""" ) def write_df_to_hive(pandas_df, schema_name, table_name, hdfs_path=None): """ Exports a Panadas dataframe into a table in Hive. Example: write_df_to_hive(pandas_df1, "my_schema", "my_table1") write_df_to_hive(pandas_df2, "my_schema", "my_table2") write_df_to_hive(pandas_df1, "my_schema", "my_table3", hdfs_path="hdfs://.../my_schema.db/my_table1") Parameters ---------- pandas_df : an ojbect of Pandas Dataframe schema_name : str A target schema name of Hive table_name : str A target table name of Hive hdfs_path : str, default None A path of Hadoop file system as an optional parameter. It will be used to create an external table. If hdfs_path is not None, data in the dataframe will not be converted. A metadata in the dataframe is just used to create a Hive table. """ engine = get_sqlalchemy_engine() conn = get_hive_conn() cursor = conn.cursor() import hashlib tmp_table_name = hashlib.sha1(str(f"{schema_name}.{table_name}").encode("utf-8")).hexdigest() try: _write_df(pandas_df, schema_name, table_name, hdfs_path, engine, cursor, tmp_table_name) finally: cursor.execute(f"drop table if exists {schema_name}.{tmp_table_name}") cursor.close() conn.close()
# Copyright (c) 2018 getcarrier.io # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import tempfile import zipfile from copy import deepcopy from json import loads, dumps from os import environ, path from celery import Celery, chord from celery.contrib.abortable import AbortableAsyncResult from time import sleep, time from uuid import uuid4 import re from datetime import datetime import requests import sys REDIS_USER = environ.get('REDIS_USER', '') REDIS_PASSWORD = environ.get('REDIS_PASSWORD', 'password') REDIS_HOST = environ.get('REDIS_HOST', 'localhost') REDIS_PORT = environ.get('REDIS_PORT', '6379') REDIS_DB = environ.get('REDIS_DB', 1) GALLOPER_WEB_HOOK = environ.get('GALLOPER_WEB_HOOK', None) LOKI_HOST = environ.get('loki_host', None) LOKI_PORT = environ.get('loki_port', '3100') GALLOPER_URL = environ.get('galloper_url', None) PROJECT_ID = environ.get('project_id', None) BUCKET = environ.get('bucket', None) TEST = environ.get('artifact', None) ADDITIONAL_FILES = environ.get('additional_files', None) BUILD_ID = environ.get('build_id', f'build_{uuid4()}') DISTRIBUTED_MODE_PREFIX = environ.get('PREFIX', f'test_results_{uuid4()}_') JVM_ARGS = environ.get('JVM_ARGS', None) TOKEN = environ.get('token', None) mounts = environ.get('mounts', None) release_id = environ.get('release_id', None) app = None SAMPLER = environ.get('sampler', "REQUEST") REQUEST = environ.get('request', "All") CALCULATION_DELAY = environ.get('data_wait', 300) CHECK_SATURATION = environ.get('check_saturation', None) MAX_ERRORS = environ.get('error_rate', 100) DEVIATION = environ.get('dev', 0.02) MAX_DEVIATION = environ.get('max_dev', 0.05) U_AGGR = environ.get('u_aggr', 1) KILL_MAX_WAIT_TIME = 10 JOB_TYPE_MAPPING = { "perfmeter": "jmeter", "perfgun": "gatling", "free_style": "other", "observer": "observer", "dast": "dast", "sast": "sast", } PROJECT_PACKAGE_MAPPER = { "basic": {"duration": 1800, "load_generators": 1}, "startup": {"duration": 7200, "load_generators": 5}, "professional": {"duration": 28800, "load_generators": 10}, "enterprise": {"duration": -1, "load_generators": -1}, "custom": {"duration": -1, "load_generators": -1}, # need to set custom values? } ENV_VARS_MAPPING = { "REDIS_USER": "REDIS_USER", "REDIS_PASSWORD": "REDIS_PASSWORD", "REDIS_HOST": "REDIS_HOST", "REDIS_PORT": "REDIS_PORT", "REDIS_DB": "REDIS_DB", "GALLOPER_WEB_HOOK": "GALLOPER_WEB_HOOK", "LOKI_PORT": "LOKI_PORT", "mounts": "mounts", "release_id": "release_id", "sampler": "SAMPLER", "request": "REQUEST", "data_wait": "CALCULATION_DELAY", "check_saturation": "CHECK_SATURATION", "error_rate": "MAX_ERRORS", "dev": "DEVIATION", "max_dev": "MAX_DEVIATION", "galloper_url": "GALLOPER_URL", "token": "TOKEN", "project_id": "PROJECT_ID", "bucket": "BUCKET", "u_aggr": "U_AGGR" } def str2bool(v): if isinstance(v, bool): return v if v.lower() in ('yes', 'true', 't', 'y', '1'): return True elif v.lower() in ('no', 'false', 'f', 'n', '0'): return False else: raise argparse.ArgumentTypeError('Boolean value expected.') def str2json(v): try: return loads(v) except: raise argparse.ArgumentTypeError('Json is not properly formatted.') def arg_parse(): parser = argparse.ArgumentParser(description='Carrier Command Center') parser.add_argument('-c', '--container', action="append", type=str, default=[], help="Name of container to run the job e.g. getcarrier/dusty:latest") parser.add_argument('-e', '--execution_params', action="append", type=str2json, default=[], help="Execution params for jobs e.g. \n" "{\n\t'host': 'localhost', \n\t'port':'443', \n\t'protocol':'https'" ", \n\t'project_name':'MY_PET', \n\t'environment':'stag', \n\t" "'test_type': 'basic'" "\n} will be valid for dast container") parser.add_argument('-t', '--job_type', action="append", type=str, default=[], help="Type of a job: e.g. sast, dast, perfmeter, perfgun, perf-ui") parser.add_argument('-n', '--job_name', type=str, default="", help="Name of a job (e.g. unique job ID, like %JOBNAME%_%JOBID%)") parser.add_argument('-q', '--concurrency', action="append", type=int, default=[], help="Number of parallel workers to run the job") parser.add_argument('-r', '--channel', action="append", default=[], type=int, help="Number of parallel workers to run the job") parser.add_argument('-a', '--artifact', default="", type=str) parser.add_argument('-b', '--bucket', default="", type=str) parser.add_argument('-sr', '--save_reports', default=False, type=str2bool) parser.add_argument('-j', '--junit', default=False, type=str2bool) parser.add_argument('-qg', '--quality_gate', default=False, type=str2bool) parser.add_argument('-jr', '--jira', default=False, type=str2bool) parser.add_argument('-eml', '--email', default=False, type=str2bool) parser.add_argument('-el', '--email_recipients', default="", type=str) parser.add_argument('-rp', '--report_portal', default=False, type=str2bool) parser.add_argument('-ado', '--azure_devops', default=False, type=str2bool) parser.add_argument('-p', '--report_path', default="/tmp/reports", type=str) parser.add_argument('-d', '--deviation', default=0, type=float) parser.add_argument('-md', '--max_deviation', default=0, type=float) parser.add_argument('-tid', '--test_id', default="", type=str) args, _ = parser.parse_known_args() if args.test_id and GALLOPER_URL: args = append_test_config(args) return args def append_test_config(args): headers = {'content-type': 'application/json'} if TOKEN: headers['Authorization'] = f'bearer {TOKEN}' url = f"{GALLOPER_URL}/api/v1/tests/{PROJECT_ID}/{args.test_id}" # get job_type test_config = requests.get(url, headers=headers).json() job_type = args.job_type[0] if args.job_type else test_config["job_type"] lg_type = JOB_TYPE_MAPPING.get(job_type, "other") params = {} execution_params = [] concurrency = [] container = [] job_type = [] tests_count = len(args.execution_params) if args.execution_params else 1 # prepare params for i in range(tests_count): if lg_type == 'jmeter': url = f"{GALLOPER_URL}/api/v1/tests/{PROJECT_ID}/backend/{args.test_id}" if args.execution_params and "cmd" in args.execution_params[i].keys(): exec_params = args.execution_params[i]['cmd'].split("-J") for each in exec_params: if "=" in each: _ = each.split("=") params[_[0]] = str(_[1]).strip() elif lg_type == 'gatling': url = f"{GALLOPER_URL}/api/v1/tests/{PROJECT_ID}/backend/{args.test_id}" if args.execution_params and "GATLING_TEST_PARAMS" in args.execution_params[i].keys(): exec_params = args.execution_params[i]['GATLING_TEST_PARAMS'].split("-D") for each in exec_params: if "=" in each: _ = each.split("=") params[_[0]] = str(_[1]).strip() elif lg_type == 'observer': url = f"{GALLOPER_URL}/api/v1/tests/{PROJECT_ID}/frontend/{args.test_id}" elif lg_type == 'dast': url = f"{GALLOPER_URL}/api/v1/tests/{PROJECT_ID}/dast/{args.test_id}" elif lg_type == 'sast': url = f"{GALLOPER_URL}/api/v1/tests/{PROJECT_ID}/sast/{args.test_id}" else: print(f"No data found for test_id={args.test_id}") exit(1) data = { "parallel": args.concurrency[i] if args.concurrency else None, "params": dumps(params), "emails": args.email_recipients if args.email_recipients else "", "type": "config" } # merge params with test config test_config = requests.post(url, json=data, headers=headers).json() # set args end env vars execution_params.append(loads(test_config["execution_params"])) concurrency.append(test_config["concurrency"]) container.append(test_config["container"]) job_type.append(test_config["job_type"]) for each in ["artifact", "bucket", "job_name", "email_recipients"]: if not getattr(args, each) and each in test_config.keys(): setattr(args, each, test_config[each]) for each in ["container", "job_type"]: if not getattr(args, each) and each in test_config.keys(): setattr(args, each, [test_config[each]]) for each in ["junit", "quality_gate", "save_reports", "jira", "report_portal", "email", "azure_devops"]: if not getattr(args, each) and each in test_config.keys(): setattr(args, each, str2bool(test_config[each])) env_vars = test_config["cc_env_vars"] for key, value in env_vars.items(): if not environ.get(key, None): globals()[ENV_VARS_MAPPING.get(key)] = value setattr(args, "execution_params", execution_params) setattr(args, "concurrency", concurrency) setattr(args, "container", container) setattr(args, "job_type", job_type) if "git" in test_config.keys(): from control_tower.git_clone import clone_repo, post_artifact git_setting = test_config["git"] clone_repo(git_setting) post_artifact(GALLOPER_URL, TOKEN, PROJECT_ID) setattr(args, "artifact", "tests_from_git_repo.zip") setattr(args, "bucket", "tests") return args def parse_id(): parser = argparse.ArgumentParser(description='Carrier Command Center') parser.add_argument('-g', '--groupid', type=str, default="", help="ID of the group for a task") parser.add_argument('-c', '--container', type=str, help="Name of container to run the job " "e.g. getcarrier/dusty:latest") parser.add_argument('-t', '--job_type', type=str, help="Type of a job: e.g. sast, dast, perf-jmeter, perf-ui") parser.add_argument('-n', '--job_name', type=str, help="Name of a job (e.g. unique job ID, like %JOBNAME%_%JOBID%)") args, _ = parser.parse_known_args() if args.groupid: for unparsed in _: args.groupid = args.groupid + unparsed if 'group_id' in args.groupid: args.groupid = loads(args.groupid) return args def connect_to_celery(concurrency, redis_db=None, retry=5): if not (redis_db and isinstance(redis_db, int)): redis_db = REDIS_DB app = Celery('CarrierExecutor', broker=f'redis://{REDIS_USER}:{REDIS_PASSWORD}@{REDIS_HOST}:{REDIS_PORT}/{redis_db}', backend=f'redis://{REDIS_USER}:{REDIS_PASSWORD}@{REDIS_HOST}:{REDIS_PORT}/{redis_db}', include=['celery']) app.conf.update(broker_transport_options={'max_retries': 3}) try: if not app.control.inspect().stats() and retry != 0: print("retry") retry -= 1 sleep(60) return connect_to_celery(concurrency, redis_db=redis_db, retry=retry) except: print("Invalid REDIS password") exit(1) if concurrency: workers = sum(value['pool']['max-concurrency'] for key, value in app.control.inspect().stats().items()) active = sum(len(value) for key, value in app.control.inspect().active().items()) available = workers - active print(f"Total Workers: {workers}") print(f"Available Workers: {available}") if workers < concurrency: print(f"We are unable to process your request due to limited resources. We have {workers} available") exit(1) return app def start_job(args=None): if not args: args = arg_parse() if GALLOPER_URL and PROJECT_ID and TOKEN: package = get_project_package() allowable_load_generators = PROJECT_PACKAGE_MAPPER.get(package)["load_generators"] for each in args.concurrency: if allowable_load_generators != -1 and allowable_load_generators < each: print(f"Only {allowable_load_generators} parallel load generators allowable for {package} package.") exit(0) concurrency_cluster = {} channels = args.channel if not channels: for _ in args.container: channels.append(REDIS_DB) for index in range(len(channels)): if str(channels[index]) not in concurrency_cluster: concurrency_cluster[str(channels[index])] = 0 concurrency_cluster[str(channels[index])] += args.concurrency[index] celery_connection_cluster = {} results_bucket = str(args.job_name).replace("_", "").replace(" ", "").lower() integration = [] for each in ["jira", "report_portal", "email", "azure_devops"]: if getattr(args, each): integration.append(each) post_processor_args = { "galloper_url": GALLOPER_URL, "project_id": PROJECT_ID, "galloper_web_hook": GALLOPER_WEB_HOOK, "bucket": results_bucket, "prefix": DISTRIBUTED_MODE_PREFIX, "junit": args.junit, "token": TOKEN, "integration": integration, "email_recipients": args.email_recipients } for channel in channels: if str(channel) not in celery_connection_cluster: celery_connection_cluster[str(channel)] = {} celery_connection_cluster[str(channel)]['app'] = connect_to_celery(concurrency_cluster[str(channel)], channel) celery_connection_cluster[str(channel)]['post_processor'] = \ celery_connection_cluster[str(channel)]['app'].signature('tasks.post_process', kwargs=post_processor_args) job_type = "".join(args.container) job_type += "".join(args.job_type) for i in range(len(args.container)): if 'tasks' not in celery_connection_cluster[str(channels[i])]: celery_connection_cluster[str(channels[i])]['tasks'] = [] exec_params = deepcopy(args.execution_params[i]) if mounts: exec_params['mounts'] = mounts if args.job_type[i] in ['perfgun', 'perfmeter']: if path.exists('/tmp/config.yaml'): with open('/tmp/config.yaml', 'r') as f: config_yaml = f.read() exec_params['config_yaml'] = dumps(config_yaml) else: exec_params['config_yaml'] = {} if LOKI_HOST: exec_params['loki_host'] = LOKI_HOST exec_params['loki_port'] = LOKI_PORT if ADDITIONAL_FILES: exec_params['additional_files'] = ADDITIONAL_FILES if JVM_ARGS: exec_params['JVM_ARGS'] = JVM_ARGS if 'additional_files' in exec_params: exec_params['additional_files'] = dumps(exec_params['additional_files']).replace("'", "\"") exec_params['build_id'] = BUILD_ID exec_params['DISTRIBUTED_MODE_PREFIX'] = DISTRIBUTED_MODE_PREFIX exec_params['galloper_url'] = GALLOPER_URL exec_params['bucket'] = BUCKET if not args.bucket else args.bucket exec_params['artifact'] = TEST if not args.artifact else args.artifact exec_params['results_bucket'] = results_bucket exec_params['save_reports'] = args.save_reports if PROJECT_ID: exec_params['project_id'] = PROJECT_ID if TOKEN: exec_params['token'] = TOKEN elif args.job_type[i] == "observer": execution_params = args.execution_params[i] exec_params["GALLOPER_URL"] = GALLOPER_URL exec_params["REPORTS_BUCKET"] = BUCKET exec_params["RESULTS_BUCKET"] = results_bucket exec_params["RESULTS_REPORT_NAME"] = DISTRIBUTED_MODE_PREFIX exec_params["GALLOPER_PROJECT_ID"] = PROJECT_ID exec_params["JOB_NAME"] = args.job_name if TOKEN: exec_params['token'] = TOKEN if mounts: exec_params['mounts'] = mounts if not execution_params["mounts"] else execution_params[ "mounts"] elif args.job_type[i] == "sast": if "code_path" in exec_params: print("Uploading code artifact to Galloper ...") with tempfile.TemporaryFile() as src_file: with zipfile.ZipFile(src_file, "w", zipfile.ZIP_DEFLATED) as zip_file: src_dir = os.path.abspath("/code") for dirpath, _, filenames in os.walk(src_dir): if dirpath == src_dir: rel_dir = "" else: rel_dir = os.path.relpath(dirpath, src_dir) zip_file.write(dirpath, arcname=rel_dir) for filename in filenames: zip_file.write( os.path.join(dirpath, filename), arcname=os.path.join(rel_dir, filename) ) src_file.seek(0) headers = { "Authorization": f"Bearer {TOKEN}" } url = f"{GALLOPER_URL}/api/v1/artifacts/{PROJECT_ID}/sast/{args.test_id}.zip" requests.post( url, headers=headers, files={ "file": (f"{args.test_id}.zip", src_file) } ) for _ in range(int(args.concurrency[i])): task_kwargs = {'job_type': str(args.job_type[i]), 'container': args.container[i], 'execution_params': exec_params, 'redis_connection': '', 'job_name': args.job_name} celery_connection_cluster[str(channels[i])]['tasks'].append( celery_connection_cluster[str(channels[i])]['app'].signature('tasks.execute', kwargs=task_kwargs)) test_details = test_start_notify(args) groups = [] for each in celery_connection_cluster: task_group = chord( celery_connection_cluster[each]['tasks'], app=celery_connection_cluster[each]['app'])( celery_connection_cluster[each]['post_processor']) groups.append(task_group) return groups, test_details def get_project_package(): try: url = f"{GALLOPER_URL}/api/v1/project/{PROJECT_ID}" headers = {'content-type': 'application/json', 'Authorization': f'bearer {TOKEN}'} package = requests.get(url, headers=headers).json()["package"] except: package = "custom" return package def test_start_notify(args): if GALLOPER_URL: users_count = 0 duration = 0 vusers_var_names = ["vusers", "users", "users_count", "ramp_users", "user_count"] lg_type = JOB_TYPE_MAPPING.get(args.job_type[0], "other") tests_count = len(args.execution_params) if args.execution_params else 1 if lg_type == 'jmeter': for i in range(tests_count): exec_params = args.execution_params[i]['cmd'] + " " test_type = re.findall('-Jtest.type=(.+?) ', exec_params) test_type = test_type[0] if len(test_type) else 'demo' environment = re.findall("-Jenv.type=(.+?) ", exec_params) environment = environment[0] if len(environment) else 'demo' test_name = re.findall("-Jtest_name=(.+?) ", exec_params) test_name = test_name[0] if len(test_name) else 'test' duration = re.findall("-JDURATION=(.+?) ", exec_params) duration = float(duration[0]) if len(duration) else 0 for each in vusers_var_names: if f'-j{each}' in exec_params.lower(): pattern = f'-j{each}=(.+?) ' vusers = re.findall(pattern, exec_params.lower()) users_count += int(vusers[0]) * args.concurrency[i] break elif lg_type == 'gatling': for i in range(tests_count): exec_params = args.execution_params[i] test_type = exec_params['test_type'] if exec_params.get('test_type') else 'demo' test_name = exec_params['test'].split(".")[1].lower() if exec_params.get('test') else 'test' environment = exec_params['env'] if exec_params.get('env') else 'demo' if exec_params.get('GATLING_TEST_PARAMS'): if '-dduration' in exec_params['GATLING_TEST_PARAMS'].lower(): duration = re.findall("-dduration=(.+?) ", exec_params['GATLING_TEST_PARAMS'].lower())[0] for each in vusers_var_names: if f'-d{each}' in exec_params['GATLING_TEST_PARAMS'].lower(): pattern = f'-d{each}=(.+?) ' vusers = re.findall(pattern, exec_params['GATLING_TEST_PARAMS'].lower()) users_count += int(vusers[0]) * args.concurrency[i] break else: return {} start_time = datetime.utcnow().isoformat("T") + "Z" data = {'build_id': BUILD_ID, 'test_name': test_name, 'lg_type': lg_type, 'type': test_type, 'duration': duration, 'vusers': users_count, 'environment': environment, 'start_time': start_time, 'missed': 0, 'status': 'In progress'} if release_id: data['release_id'] = release_id headers = {'content-type': 'application/json'} if TOKEN: headers['Authorization'] = f'bearer {TOKEN}' if PROJECT_ID: url = f'{GALLOPER_URL}/api/v1/reports/{PROJECT_ID}' else: url = f'{GALLOPER_URL}/api/report' res = requests.post(url, json=data, headers=headers).json() if res.get('Forbidden', None): print(f"Forbidden: {res.get("Forbidden")}") exit(0) return res return {} def start_job_exec(args=None): start_job(args) exit(0) def check_ready(result): if result and not result.ready(): return False return True def check_test_is_saturating(test_id=None, deviation=0.02, max_deviation=0.05): if test_id and PROJECT_ID and SAMPLER and REQUEST: url = f'{GALLOPER_URL}/api/v1/saturation' headers = {'Authorization': f'bearer {TOKEN}'} if TOKEN else {} headers["Content-type"] = "application/json" params = { "test_id": test_id, "project_id": PROJECT_ID, "sampler": SAMPLER, "request": REQUEST, "wait_till": CALCULATION_DELAY, "max_errors": MAX_ERRORS, "deviation": deviation, "max_deviation": max_deviation, "u_aggr": U_AGGR } return requests.get(url, params=params, headers=headers).json() return {"message": "Test is in progress", "code": 0} # TODO check for lost connection and retry def track_job(group, test_id=None, deviation=0.02, max_deviation=0.05): result = 0 test_start = time() max_duration = -1 if GALLOPER_URL and PROJECT_ID and TOKEN: package = get_project_package() max_duration = PROJECT_PACKAGE_MAPPER.get(package)["duration"] while not group.ready(): sleep(60) if CHECK_SATURATION: test_status = check_test_is_saturating(test_id, deviation, max_deviation) print(test_status) if test_status.get("code", 0) == 1: kill_job(group) result = 1 else: print("Still processing ...") if test_was_canceled(test_id) and result != 1: print("Test was canceled") kill_job(group) result = 1 if max_duration != -1 and max_duration <= int((time() - test_start)) and result != 1: print(f"Exceeded max test duration - {max_duration} sec") kill_job(group) if group.successful(): print("We are done successfully") else: print("We are failed badly") group.forget() return result def test_was_canceled(test_id): try: if test_id and PROJECT_ID and GALLOPER_URL: url = f'{GALLOPER_URL}/api/v1/reports/{PROJECT_ID}/{test_id}/status' headers = {'Authorization': f'bearer {TOKEN}'} if TOKEN else {} headers["Content-type"] = "application/json" status = requests.get(url, headers=headers).json()['message'] return True if status == "Canceled" else False return False except: return False def _start_and_track(args=None): if not args: args = arg_parse() deviation = DEVIATION if args.deviation == 0 else args.deviation max_deviation = MAX_DEVIATION if args.max_deviation == 0 else args.max_deviation groups, test_details = start_job(args) print("Job started, waiting for containers to settle ... ") for group in groups: track_job(group, test_details.get("id", None), deviation, max_deviation) if args.junit: print("Processing junit report ...") process_junit_report(args) if args.job_type[0] in ["dast", "sast"] and args.quality_gate: print("Processing security quality gate ...") process_security_quality_gate(args) def start_and_track(args=None): _start_and_track(args) exit(0) def process_security_quality_gate(args): # Save jUnit report as file to local filesystem junit_report_data = download_junit_report( args.job_type[0], f"{args.test_id}_junit_report.xml", retry=12 ) if junit_report_data: with open(os.path.join(args.report_path, f"junit_report_{args.test_id}.xml"), "w") as rept: rept.write(junit_report_data.text) # Quality Gate quality_gate_data = download_junit_report( args.job_type[0], f"{args.test_id}_quality_gate_report.json", retry=12 ) if not quality_gate_data: print("No security quality gate data found") return quality_gate = loads(quality_gate_data.text) if quality_gate["quality_gate_stats"]: for line in quality_gate["quality_gate_stats"]: print(line) if quality_gate["fail_quality_gate"]: exit(1) def process_junit_report(args): file_name = "junit_report_{}.xml".format(DISTRIBUTED_MODE_PREFIX) results_bucket = str(args.job_name).replace("_", "").lower() junit_report = download_junit_report(results_bucket, file_name, retry=12) if junit_report: with open("{}/{}".format(args.report_path, file_name), "w") as f: f.write(junit_report.text) failed = int(re.findall("testsuites .+? failures=\"(.+?)\"", junit_report.text)[0]) total = int(re.findall("testsuites .+? tests=\"(.+?)\"", junit_report.text)[0]) errors = int(re.findall("testsuites .+? errors=\"(.+?)\"", junit_report.text)[0]) skipped = int(re.findall("testsuite .+? skipped=\"(.+?)\"", junit_report.text)[0]) print("**********************************************") print("* Performance testing jUnit report | Carrier *") print("**********************************************") print(f"Tests run: {total}, Failures: {failed}, Errors: {errors}, Skipped: {skipped}") if args.quality_gate: rate = round(float(failed / total) * 100, 2) if total != 0 else 0 if rate > 20: print("Missed threshold rate is {}".format(rate), file=sys.stderr) exit(1) def download_junit_report(results_bucket, file_name, retry): if PROJECT_ID: url = f'{GALLOPER_URL}/api/v1/artifacts/{PROJECT_ID}/{results_bucket}/{file_name}' else: url = f'{GALLOPER_URL}/artifacts/{results_bucket}/{file_name}' headers = {'Authorization': f'bearer {TOKEN}'} if TOKEN else {} junit_report = requests.get(url, headers=headers, allow_redirects=True) if junit_report.status_code != 200 or 'botocore.errorfactory.NoSuchKey' in junit_report.text: print("Waiting for report to be accessible ...") retry -= 1 if retry == 0: return None sleep(10) return download_junit_report(results_bucket, file_name, retry) return junit_report def kill_job(group): abbortables = [] _app = group.app if not group.ready(): for task in group.parent.children: abortable = AbortableAsyncResult(id=task.task_id, app=_app) abortable.abort() abbortables.append(abortable) for _ in range(KILL_MAX_WAIT_TIME): if all(task.result for task in abbortables): break sleep(60) print("Aborting distributed tasks ... ") return 0 # if __name__ == "__main__": # from control_tower.config_mock import BulkConfig # args = BulkConfig( # bulk_container=["getcarrier/perfmeter:latest"], # bulk_params=[{"cmd": "-n -t /mnt/jmeter/FloodIO.jmx -Jtest.type=debug -Jenv.type=debug " # "-Jinflux.host= -JVUSERS=100 -JDURATION=1200 " # "-JRAMP_UP=60 -Jtest_name=Flood"}], # job_type=["perfmeter"], # job_name='DemoTest', # bulk_concurrency=[2] # ) # groups, test_details, post_processor_args = start_job(args) # for group in groups: # track_job(group, test_details["id"])
# Copyright (c) 2018 getcarrier.io # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import tempfile import zipfile from copy import deepcopy from json import loads, dumps from os import environ, path from celery import Celery, chord from celery.contrib.abortable import AbortableAsyncResult from time import sleep, time from uuid import uuid4 import re from datetime import datetime import requests import sys REDIS_USER = environ.get('REDIS_USER', '') REDIS_PASSWORD = environ.get('REDIS_PASSWORD', 'password') REDIS_HOST = environ.get('REDIS_HOST', 'localhost') REDIS_PORT = environ.get('REDIS_PORT', '6379') REDIS_DB = environ.get('REDIS_DB', 1) GALLOPER_WEB_HOOK = environ.get('GALLOPER_WEB_HOOK', None) LOKI_HOST = environ.get('loki_host', None) LOKI_PORT = environ.get('loki_port', '3100') GALLOPER_URL = environ.get('galloper_url', None) PROJECT_ID = environ.get('project_id', None) BUCKET = environ.get('bucket', None) TEST = environ.get('artifact', None) ADDITIONAL_FILES = environ.get('additional_files', None) BUILD_ID = environ.get('build_id', f'build_{uuid4()}') DISTRIBUTED_MODE_PREFIX = environ.get('PREFIX', f'test_results_{uuid4()}_') JVM_ARGS = environ.get('JVM_ARGS', None) TOKEN = environ.get('token', None) mounts = environ.get('mounts', None) release_id = environ.get('release_id', None) app = None SAMPLER = environ.get('sampler', "REQUEST") REQUEST = environ.get('request', "All") CALCULATION_DELAY = environ.get('data_wait', 300) CHECK_SATURATION = environ.get('check_saturation', None) MAX_ERRORS = environ.get('error_rate', 100) DEVIATION = environ.get('dev', 0.02) MAX_DEVIATION = environ.get('max_dev', 0.05) U_AGGR = environ.get('u_aggr', 1) KILL_MAX_WAIT_TIME = 10 JOB_TYPE_MAPPING = { "perfmeter": "jmeter", "perfgun": "gatling", "free_style": "other", "observer": "observer", "dast": "dast", "sast": "sast", } PROJECT_PACKAGE_MAPPER = { "basic": {"duration": 1800, "load_generators": 1}, "startup": {"duration": 7200, "load_generators": 5}, "professional": {"duration": 28800, "load_generators": 10}, "enterprise": {"duration": -1, "load_generators": -1}, "custom": {"duration": -1, "load_generators": -1}, # need to set custom values? } ENV_VARS_MAPPING = { "REDIS_USER": "REDIS_USER", "REDIS_PASSWORD": "REDIS_PASSWORD", "REDIS_HOST": "REDIS_HOST", "REDIS_PORT": "REDIS_PORT", "REDIS_DB": "REDIS_DB", "GALLOPER_WEB_HOOK": "GALLOPER_WEB_HOOK", "LOKI_PORT": "LOKI_PORT", "mounts": "mounts", "release_id": "release_id", "sampler": "SAMPLER", "request": "REQUEST", "data_wait": "CALCULATION_DELAY", "check_saturation": "CHECK_SATURATION", "error_rate": "MAX_ERRORS", "dev": "DEVIATION", "max_dev": "MAX_DEVIATION", "galloper_url": "GALLOPER_URL", "token": "TOKEN", "project_id": "PROJECT_ID", "bucket": "BUCKET", "u_aggr": "U_AGGR" } def str2bool(v): if isinstance(v, bool): return v if v.lower() in ('yes', 'true', 't', 'y', '1'): return True elif v.lower() in ('no', 'false', 'f', 'n', '0'): return False else: raise argparse.ArgumentTypeError('Boolean value expected.') def str2json(v): try: return loads(v) except: raise argparse.ArgumentTypeError('Json is not properly formatted.') def arg_parse(): parser = argparse.ArgumentParser(description='Carrier Command Center') parser.add_argument('-c', '--container', action="append", type=str, default=[], help="Name of container to run the job e.g. getcarrier/dusty:latest") parser.add_argument('-e', '--execution_params', action="append", type=str2json, default=[], help="Execution params for jobs e.g. \n" "{\n\t'host': 'localhost', \n\t'port':'443', \n\t'protocol':'https'" ", \n\t'project_name':'MY_PET', \n\t'environment':'stag', \n\t" "'test_type': 'basic'" "\n} will be valid for dast container") parser.add_argument('-t', '--job_type', action="append", type=str, default=[], help="Type of a job: e.g. sast, dast, perfmeter, perfgun, perf-ui") parser.add_argument('-n', '--job_name', type=str, default="", help="Name of a job (e.g. unique job ID, like %JOBNAME%_%JOBID%)") parser.add_argument('-q', '--concurrency', action="append", type=int, default=[], help="Number of parallel workers to run the job") parser.add_argument('-r', '--channel', action="append", default=[], type=int, help="Number of parallel workers to run the job") parser.add_argument('-a', '--artifact', default="", type=str) parser.add_argument('-b', '--bucket', default="", type=str) parser.add_argument('-sr', '--save_reports', default=False, type=str2bool) parser.add_argument('-j', '--junit', default=False, type=str2bool) parser.add_argument('-qg', '--quality_gate', default=False, type=str2bool) parser.add_argument('-jr', '--jira', default=False, type=str2bool) parser.add_argument('-eml', '--email', default=False, type=str2bool) parser.add_argument('-el', '--email_recipients', default="", type=str) parser.add_argument('-rp', '--report_portal', default=False, type=str2bool) parser.add_argument('-ado', '--azure_devops', default=False, type=str2bool) parser.add_argument('-p', '--report_path', default="/tmp/reports", type=str) parser.add_argument('-d', '--deviation', default=0, type=float) parser.add_argument('-md', '--max_deviation', default=0, type=float) parser.add_argument('-tid', '--test_id', default="", type=str) args, _ = parser.parse_known_args() if args.test_id and GALLOPER_URL: args = append_test_config(args) return args def append_test_config(args): headers = {'content-type': 'application/json'} if TOKEN: headers['Authorization'] = f'bearer {TOKEN}' url = f"{GALLOPER_URL}/api/v1/tests/{PROJECT_ID}/{args.test_id}" # get job_type test_config = requests.get(url, headers=headers).json() job_type = args.job_type[0] if args.job_type else test_config["job_type"] lg_type = JOB_TYPE_MAPPING.get(job_type, "other") params = {} execution_params = [] concurrency = [] container = [] job_type = [] tests_count = len(args.execution_params) if args.execution_params else 1 # prepare params for i in range(tests_count): if lg_type == 'jmeter': url = f"{GALLOPER_URL}/api/v1/tests/{PROJECT_ID}/backend/{args.test_id}" if args.execution_params and "cmd" in args.execution_params[i].keys(): exec_params = args.execution_params[i]['cmd'].split("-J") for each in exec_params: if "=" in each: _ = each.split("=") params[_[0]] = str(_[1]).strip() elif lg_type == 'gatling': url = f"{GALLOPER_URL}/api/v1/tests/{PROJECT_ID}/backend/{args.test_id}" if args.execution_params and "GATLING_TEST_PARAMS" in args.execution_params[i].keys(): exec_params = args.execution_params[i]['GATLING_TEST_PARAMS'].split("-D") for each in exec_params: if "=" in each: _ = each.split("=") params[_[0]] = str(_[1]).strip() elif lg_type == 'observer': url = f"{GALLOPER_URL}/api/v1/tests/{PROJECT_ID}/frontend/{args.test_id}" elif lg_type == 'dast': url = f"{GALLOPER_URL}/api/v1/tests/{PROJECT_ID}/dast/{args.test_id}" elif lg_type == 'sast': url = f"{GALLOPER_URL}/api/v1/tests/{PROJECT_ID}/sast/{args.test_id}" else: print(f"No data found for test_id={args.test_id}") exit(1) data = { "parallel": args.concurrency[i] if args.concurrency else None, "params": dumps(params), "emails": args.email_recipients if args.email_recipients else "", "type": "config" } # merge params with test config test_config = requests.post(url, json=data, headers=headers).json() # set args end env vars execution_params.append(loads(test_config["execution_params"])) concurrency.append(test_config["concurrency"]) container.append(test_config["container"]) job_type.append(test_config["job_type"]) for each in ["artifact", "bucket", "job_name", "email_recipients"]: if not getattr(args, each) and each in test_config.keys(): setattr(args, each, test_config[each]) for each in ["container", "job_type"]: if not getattr(args, each) and each in test_config.keys(): setattr(args, each, [test_config[each]]) for each in ["junit", "quality_gate", "save_reports", "jira", "report_portal", "email", "azure_devops"]: if not getattr(args, each) and each in test_config.keys(): setattr(args, each, str2bool(test_config[each])) env_vars = test_config["cc_env_vars"] for key, value in env_vars.items(): if not environ.get(key, None): globals()[ENV_VARS_MAPPING.get(key)] = value setattr(args, "execution_params", execution_params) setattr(args, "concurrency", concurrency) setattr(args, "container", container) setattr(args, "job_type", job_type) if "git" in test_config.keys(): from control_tower.git_clone import clone_repo, post_artifact git_setting = test_config["git"] clone_repo(git_setting) post_artifact(GALLOPER_URL, TOKEN, PROJECT_ID) setattr(args, "artifact", "tests_from_git_repo.zip") setattr(args, "bucket", "tests") return args def parse_id(): parser = argparse.ArgumentParser(description='Carrier Command Center') parser.add_argument('-g', '--groupid', type=str, default="", help="ID of the group for a task") parser.add_argument('-c', '--container', type=str, help="Name of container to run the job " "e.g. getcarrier/dusty:latest") parser.add_argument('-t', '--job_type', type=str, help="Type of a job: e.g. sast, dast, perf-jmeter, perf-ui") parser.add_argument('-n', '--job_name', type=str, help="Name of a job (e.g. unique job ID, like %JOBNAME%_%JOBID%)") args, _ = parser.parse_known_args() if args.groupid: for unparsed in _: args.groupid = args.groupid + unparsed if 'group_id' in args.groupid: args.groupid = loads(args.groupid) return args def connect_to_celery(concurrency, redis_db=None, retry=5): if not (redis_db and isinstance(redis_db, int)): redis_db = REDIS_DB app = Celery('CarrierExecutor', broker=f'redis://{REDIS_USER}:{REDIS_PASSWORD}@{REDIS_HOST}:{REDIS_PORT}/{redis_db}', backend=f'redis://{REDIS_USER}:{REDIS_PASSWORD}@{REDIS_HOST}:{REDIS_PORT}/{redis_db}', include=['celery']) app.conf.update(broker_transport_options={'max_retries': 3}) try: if not app.control.inspect().stats() and retry != 0: print("retry") retry -= 1 sleep(60) return connect_to_celery(concurrency, redis_db=redis_db, retry=retry) except: print("Invalid REDIS password") exit(1) if concurrency: workers = sum(value['pool']['max-concurrency'] for key, value in app.control.inspect().stats().items()) active = sum(len(value) for key, value in app.control.inspect().active().items()) available = workers - active print(f"Total Workers: {workers}") print(f"Available Workers: {available}") if workers < concurrency: print(f"We are unable to process your request due to limited resources. We have {workers} available") exit(1) return app def start_job(args=None): if not args: args = arg_parse() if GALLOPER_URL and PROJECT_ID and TOKEN: package = get_project_package() allowable_load_generators = PROJECT_PACKAGE_MAPPER.get(package)["load_generators"] for each in args.concurrency: if allowable_load_generators != -1 and allowable_load_generators < each: print(f"Only {allowable_load_generators} parallel load generators allowable for {package} package.") exit(0) concurrency_cluster = {} channels = args.channel if not channels: for _ in args.container: channels.append(REDIS_DB) for index in range(len(channels)): if str(channels[index]) not in concurrency_cluster: concurrency_cluster[str(channels[index])] = 0 concurrency_cluster[str(channels[index])] += args.concurrency[index] celery_connection_cluster = {} results_bucket = str(args.job_name).replace("_", "").replace(" ", "").lower() integration = [] for each in ["jira", "report_portal", "email", "azure_devops"]: if getattr(args, each): integration.append(each) post_processor_args = { "galloper_url": GALLOPER_URL, "project_id": PROJECT_ID, "galloper_web_hook": GALLOPER_WEB_HOOK, "bucket": results_bucket, "prefix": DISTRIBUTED_MODE_PREFIX, "junit": args.junit, "token": TOKEN, "integration": integration, "email_recipients": args.email_recipients } for channel in channels: if str(channel) not in celery_connection_cluster: celery_connection_cluster[str(channel)] = {} celery_connection_cluster[str(channel)]['app'] = connect_to_celery(concurrency_cluster[str(channel)], channel) celery_connection_cluster[str(channel)]['post_processor'] = \ celery_connection_cluster[str(channel)]['app'].signature('tasks.post_process', kwargs=post_processor_args) job_type = "".join(args.container) job_type += "".join(args.job_type) for i in range(len(args.container)): if 'tasks' not in celery_connection_cluster[str(channels[i])]: celery_connection_cluster[str(channels[i])]['tasks'] = [] exec_params = deepcopy(args.execution_params[i]) if mounts: exec_params['mounts'] = mounts if args.job_type[i] in ['perfgun', 'perfmeter']: if path.exists('/tmp/config.yaml'): with open('/tmp/config.yaml', 'r') as f: config_yaml = f.read() exec_params['config_yaml'] = dumps(config_yaml) else: exec_params['config_yaml'] = {} if LOKI_HOST: exec_params['loki_host'] = LOKI_HOST exec_params['loki_port'] = LOKI_PORT if ADDITIONAL_FILES: exec_params['additional_files'] = ADDITIONAL_FILES if JVM_ARGS: exec_params['JVM_ARGS'] = JVM_ARGS if 'additional_files' in exec_params: exec_params['additional_files'] = dumps(exec_params['additional_files']).replace("'", "\"") exec_params['build_id'] = BUILD_ID exec_params['DISTRIBUTED_MODE_PREFIX'] = DISTRIBUTED_MODE_PREFIX exec_params['galloper_url'] = GALLOPER_URL exec_params['bucket'] = BUCKET if not args.bucket else args.bucket exec_params['artifact'] = TEST if not args.artifact else args.artifact exec_params['results_bucket'] = results_bucket exec_params['save_reports'] = args.save_reports if PROJECT_ID: exec_params['project_id'] = PROJECT_ID if TOKEN: exec_params['token'] = TOKEN elif args.job_type[i] == "observer": execution_params = args.execution_params[i] exec_params["GALLOPER_URL"] = GALLOPER_URL exec_params["REPORTS_BUCKET"] = BUCKET exec_params["RESULTS_BUCKET"] = results_bucket exec_params["RESULTS_REPORT_NAME"] = DISTRIBUTED_MODE_PREFIX exec_params["GALLOPER_PROJECT_ID"] = PROJECT_ID exec_params["JOB_NAME"] = args.job_name if TOKEN: exec_params['token'] = TOKEN if mounts: exec_params['mounts'] = mounts if not execution_params["mounts"] else execution_params[ "mounts"] elif args.job_type[i] == "sast": if "code_path" in exec_params: print("Uploading code artifact to Galloper ...") with tempfile.TemporaryFile() as src_file: with zipfile.ZipFile(src_file, "w", zipfile.ZIP_DEFLATED) as zip_file: src_dir = os.path.abspath("/code") for dirpath, _, filenames in os.walk(src_dir): if dirpath == src_dir: rel_dir = "" else: rel_dir = os.path.relpath(dirpath, src_dir) zip_file.write(dirpath, arcname=rel_dir) for filename in filenames: zip_file.write( os.path.join(dirpath, filename), arcname=os.path.join(rel_dir, filename) ) src_file.seek(0) headers = { "Authorization": f"Bearer {TOKEN}" } url = f"{GALLOPER_URL}/api/v1/artifacts/{PROJECT_ID}/sast/{args.test_id}.zip" requests.post( url, headers=headers, files={ "file": (f"{args.test_id}.zip", src_file) } ) for _ in range(int(args.concurrency[i])): task_kwargs = {'job_type': str(args.job_type[i]), 'container': args.container[i], 'execution_params': exec_params, 'redis_connection': '', 'job_name': args.job_name} celery_connection_cluster[str(channels[i])]['tasks'].append( celery_connection_cluster[str(channels[i])]['app'].signature('tasks.execute', kwargs=task_kwargs)) test_details = test_start_notify(args) groups = [] for each in celery_connection_cluster: task_group = chord( celery_connection_cluster[each]['tasks'], app=celery_connection_cluster[each]['app'])( celery_connection_cluster[each]['post_processor']) groups.append(task_group) return groups, test_details def get_project_package(): try: url = f"{GALLOPER_URL}/api/v1/project/{PROJECT_ID}" headers = {'content-type': 'application/json', 'Authorization': f'bearer {TOKEN}'} package = requests.get(url, headers=headers).json()["package"] except: package = "custom" return package def test_start_notify(args): if GALLOPER_URL: users_count = 0 duration = 0 vusers_var_names = ["vusers", "users", "users_count", "ramp_users", "user_count"] lg_type = JOB_TYPE_MAPPING.get(args.job_type[0], "other") tests_count = len(args.execution_params) if args.execution_params else 1 if lg_type == 'jmeter': for i in range(tests_count): exec_params = args.execution_params[i]['cmd'] + " " test_type = re.findall('-Jtest.type=(.+?) ', exec_params) test_type = test_type[0] if len(test_type) else 'demo' environment = re.findall("-Jenv.type=(.+?) ", exec_params) environment = environment[0] if len(environment) else 'demo' test_name = re.findall("-Jtest_name=(.+?) ", exec_params) test_name = test_name[0] if len(test_name) else 'test' duration = re.findall("-JDURATION=(.+?) ", exec_params) duration = float(duration[0]) if len(duration) else 0 for each in vusers_var_names: if f'-j{each}' in exec_params.lower(): pattern = f'-j{each}=(.+?) ' vusers = re.findall(pattern, exec_params.lower()) users_count += int(vusers[0]) * args.concurrency[i] break elif lg_type == 'gatling': for i in range(tests_count): exec_params = args.execution_params[i] test_type = exec_params['test_type'] if exec_params.get('test_type') else 'demo' test_name = exec_params['test'].split(".")[1].lower() if exec_params.get('test') else 'test' environment = exec_params['env'] if exec_params.get('env') else 'demo' if exec_params.get('GATLING_TEST_PARAMS'): if '-dduration' in exec_params['GATLING_TEST_PARAMS'].lower(): duration = re.findall("-dduration=(.+?) ", exec_params['GATLING_TEST_PARAMS'].lower())[0] for each in vusers_var_names: if f'-d{each}' in exec_params['GATLING_TEST_PARAMS'].lower(): pattern = f'-d{each}=(.+?) ' vusers = re.findall(pattern, exec_params['GATLING_TEST_PARAMS'].lower()) users_count += int(vusers[0]) * args.concurrency[i] break else: return {} start_time = datetime.utcnow().isoformat("T") + "Z" data = {'build_id': BUILD_ID, 'test_name': test_name, 'lg_type': lg_type, 'type': test_type, 'duration': duration, 'vusers': users_count, 'environment': environment, 'start_time': start_time, 'missed': 0, 'status': 'In progress'} if release_id: data['release_id'] = release_id headers = {'content-type': 'application/json'} if TOKEN: headers['Authorization'] = f'bearer {TOKEN}' if PROJECT_ID: url = f'{GALLOPER_URL}/api/v1/reports/{PROJECT_ID}' else: url = f'{GALLOPER_URL}/api/report' res = requests.post(url, json=data, headers=headers).json() if res.get('Forbidden', None): print(f"Forbidden: {res.get('Forbidden')}") exit(0) return res return {} def start_job_exec(args=None): start_job(args) exit(0) def check_ready(result): if result and not result.ready(): return False return True def check_test_is_saturating(test_id=None, deviation=0.02, max_deviation=0.05): if test_id and PROJECT_ID and SAMPLER and REQUEST: url = f'{GALLOPER_URL}/api/v1/saturation' headers = {'Authorization': f'bearer {TOKEN}'} if TOKEN else {} headers["Content-type"] = "application/json" params = { "test_id": test_id, "project_id": PROJECT_ID, "sampler": SAMPLER, "request": REQUEST, "wait_till": CALCULATION_DELAY, "max_errors": MAX_ERRORS, "deviation": deviation, "max_deviation": max_deviation, "u_aggr": U_AGGR } return requests.get(url, params=params, headers=headers).json() return {"message": "Test is in progress", "code": 0} # TODO check for lost connection and retry def track_job(group, test_id=None, deviation=0.02, max_deviation=0.05): result = 0 test_start = time() max_duration = -1 if GALLOPER_URL and PROJECT_ID and TOKEN: package = get_project_package() max_duration = PROJECT_PACKAGE_MAPPER.get(package)["duration"] while not group.ready(): sleep(60) if CHECK_SATURATION: test_status = check_test_is_saturating(test_id, deviation, max_deviation) print(test_status) if test_status.get("code", 0) == 1: kill_job(group) result = 1 else: print("Still processing ...") if test_was_canceled(test_id) and result != 1: print("Test was canceled") kill_job(group) result = 1 if max_duration != -1 and max_duration <= int((time() - test_start)) and result != 1: print(f"Exceeded max test duration - {max_duration} sec") kill_job(group) if group.successful(): print("We are done successfully") else: print("We are failed badly") group.forget() return result def test_was_canceled(test_id): try: if test_id and PROJECT_ID and GALLOPER_URL: url = f'{GALLOPER_URL}/api/v1/reports/{PROJECT_ID}/{test_id}/status' headers = {'Authorization': f'bearer {TOKEN}'} if TOKEN else {} headers["Content-type"] = "application/json" status = requests.get(url, headers=headers).json()['message'] return True if status == "Canceled" else False return False except: return False def _start_and_track(args=None): if not args: args = arg_parse() deviation = DEVIATION if args.deviation == 0 else args.deviation max_deviation = MAX_DEVIATION if args.max_deviation == 0 else args.max_deviation groups, test_details = start_job(args) print("Job started, waiting for containers to settle ... ") for group in groups: track_job(group, test_details.get("id", None), deviation, max_deviation) if args.junit: print("Processing junit report ...") process_junit_report(args) if args.job_type[0] in ["dast", "sast"] and args.quality_gate: print("Processing security quality gate ...") process_security_quality_gate(args) def start_and_track(args=None): _start_and_track(args) exit(0) def process_security_quality_gate(args): # Save jUnit report as file to local filesystem junit_report_data = download_junit_report( args.job_type[0], f"{args.test_id}_junit_report.xml", retry=12 ) if junit_report_data: with open(os.path.join(args.report_path, f"junit_report_{args.test_id}.xml"), "w") as rept: rept.write(junit_report_data.text) # Quality Gate quality_gate_data = download_junit_report( args.job_type[0], f"{args.test_id}_quality_gate_report.json", retry=12 ) if not quality_gate_data: print("No security quality gate data found") return quality_gate = loads(quality_gate_data.text) if quality_gate["quality_gate_stats"]: for line in quality_gate["quality_gate_stats"]: print(line) if quality_gate["fail_quality_gate"]: exit(1) def process_junit_report(args): file_name = "junit_report_{}.xml".format(DISTRIBUTED_MODE_PREFIX) results_bucket = str(args.job_name).replace("_", "").lower() junit_report = download_junit_report(results_bucket, file_name, retry=12) if junit_report: with open("{}/{}".format(args.report_path, file_name), "w") as f: f.write(junit_report.text) failed = int(re.findall("testsuites .+? failures=\"(.+?)\"", junit_report.text)[0]) total = int(re.findall("testsuites .+? tests=\"(.+?)\"", junit_report.text)[0]) errors = int(re.findall("testsuites .+? errors=\"(.+?)\"", junit_report.text)[0]) skipped = int(re.findall("testsuite .+? skipped=\"(.+?)\"", junit_report.text)[0]) print("**********************************************") print("* Performance testing jUnit report | Carrier *") print("**********************************************") print(f"Tests run: {total}, Failures: {failed}, Errors: {errors}, Skipped: {skipped}") if args.quality_gate: rate = round(float(failed / total) * 100, 2) if total != 0 else 0 if rate > 20: print("Missed threshold rate is {}".format(rate), file=sys.stderr) exit(1) def download_junit_report(results_bucket, file_name, retry): if PROJECT_ID: url = f'{GALLOPER_URL}/api/v1/artifacts/{PROJECT_ID}/{results_bucket}/{file_name}' else: url = f'{GALLOPER_URL}/artifacts/{results_bucket}/{file_name}' headers = {'Authorization': f'bearer {TOKEN}'} if TOKEN else {} junit_report = requests.get(url, headers=headers, allow_redirects=True) if junit_report.status_code != 200 or 'botocore.errorfactory.NoSuchKey' in junit_report.text: print("Waiting for report to be accessible ...") retry -= 1 if retry == 0: return None sleep(10) return download_junit_report(results_bucket, file_name, retry) return junit_report def kill_job(group): abbortables = [] _app = group.app if not group.ready(): for task in group.parent.children: abortable = AbortableAsyncResult(id=task.task_id, app=_app) abortable.abort() abbortables.append(abortable) for _ in range(KILL_MAX_WAIT_TIME): if all(task.result for task in abbortables): break sleep(60) print("Aborting distributed tasks ... ") return 0 # if __name__ == "__main__": # from control_tower.config_mock import BulkConfig # args = BulkConfig( # bulk_container=["getcarrier/perfmeter:latest"], # bulk_params=[{"cmd": "-n -t /mnt/jmeter/FloodIO.jmx -Jtest.type=debug -Jenv.type=debug " # "-Jinflux.host= -JVUSERS=100 -JDURATION=1200 " # "-JRAMP_UP=60 -Jtest_name=Flood"}], # job_type=["perfmeter"], # job_name='DemoTest', # bulk_concurrency=[2] # ) # groups, test_details, post_processor_args = start_job(args) # for group in groups: # track_job(group, test_details["id"])
"""Support for Huawei LTE routers.""" from __future__ import annotations from collections import defaultdict from collections.abc import Callable from contextlib import suppress from dataclasses import dataclass, field from datetime import timedelta import logging import time from typing import Any, NamedTuple, cast from huawei_lte_api.AuthorizedConnection import AuthorizedConnection from huawei_lte_api.Client import Client from huawei_lte_api.Connection import Connection from huawei_lte_api.exceptions import ( ResponseErrorException, ResponseErrorLoginRequiredException, ResponseErrorNotSupportedException, ) from requests.exceptions import Timeout from url_normalize import url_normalize import voluptuous as vol from homeassistant.components.notify import DOMAIN as NOTIFY_DOMAIN from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry from homeassistant.const import ( ATTR_HW_VERSION, ATTR_MODEL, ATTR_SW_VERSION, CONF_MAC, CONF_NAME, CONF_PASSWORD, CONF_RECIPIENT, CONF_URL, CONF_USERNAME, EVENT_HOMEASSISTANT_STOP, Platform, ) from homeassistant.core import HomeAssistant, ServiceCall from homeassistant.exceptions import ConfigEntryNotReady from homeassistant.helpers import ( config_validation as cv, device_registry as dr, discovery, entity_registry, ) from homeassistant.helpers.dispatcher import async_dispatcher_connect, dispatcher_send from homeassistant.helpers.entity import DeviceInfo, Entity from homeassistant.helpers.event import async_track_time_interval from homeassistant.helpers.typing import ConfigType from .const import ( ADMIN_SERVICES, ALL_KEYS, ATTR_UNIQUE_ID, CONF_UNAUTHENTICATED_MODE, CONNECTION_TIMEOUT, DEFAULT_DEVICE_NAME, DEFAULT_NOTIFY_SERVICE_NAME, DOMAIN, KEY_DEVICE_BASIC_INFORMATION, KEY_DEVICE_INFORMATION, KEY_DEVICE_SIGNAL, KEY_DIALUP_MOBILE_DATASWITCH, KEY_LAN_HOST_INFO, KEY_MONITORING_CHECK_NOTIFICATIONS, KEY_MONITORING_MONTH_STATISTICS, KEY_MONITORING_STATUS, KEY_MONITORING_TRAFFIC_STATISTICS, KEY_NET_CURRENT_PLMN, KEY_NET_NET_MODE, KEY_SMS_SMS_COUNT, KEY_WLAN_HOST_LIST, KEY_WLAN_WIFI_FEATURE_SWITCH, NOTIFY_SUPPRESS_TIMEOUT, SERVICE_CLEAR_TRAFFIC_STATISTICS, SERVICE_REBOOT, SERVICE_RESUME_INTEGRATION, SERVICE_SUSPEND_INTEGRATION, UPDATE_SIGNAL, ) from .utils import get_device_macs _LOGGER = logging.getLogger(__name__) SCAN_INTERVAL = timedelta(seconds=10) NOTIFY_SCHEMA = vol.Any( None, vol.Schema( { vol.Optional(CONF_NAME): cv.string, vol.Optional(CONF_RECIPIENT): vol.Any( None, vol.All(cv.ensure_list, [cv.string]) ), } ), ) CONFIG_SCHEMA = vol.Schema( { DOMAIN: vol.All( cv.ensure_list, [ vol.Schema( { vol.Required(CONF_URL): cv.url, vol.Optional(CONF_USERNAME): cv.string, vol.Optional(CONF_PASSWORD): cv.string, vol.Optional(NOTIFY_DOMAIN): NOTIFY_SCHEMA, } ) ], ) }, extra=vol.ALLOW_EXTRA, ) SERVICE_SCHEMA = vol.Schema({vol.Optional(CONF_URL): cv.url}) PLATFORMS = [ Platform.BINARY_SENSOR, Platform.DEVICE_TRACKER, Platform.SENSOR, Platform.SWITCH, ] @dataclass class Router: """Class for router state.""" hass: HomeAssistant config_entry: ConfigEntry connection: Connection url: str data: dict[str, Any] = field(default_factory=dict, init=False) subscriptions: dict[str, set[str]] = field( default_factory=lambda: defaultdict( set, ((x, {"initial_scan"}) for x in ALL_KEYS) ), init=False, ) inflight_gets: set[str] = field(default_factory=set, init=False) client: Client = field(init=False) suspended: bool = field(default=False, init=False) notify_last_attempt: float = field(default=-1, init=False) def __post_init__(self) -> None: """Set up internal state on init.""" self.client = Client(self.connection) @property def device_name(self) -> str: """Get router device name.""" for key, item in ( (KEY_DEVICE_BASIC_INFORMATION, "devicename"), (KEY_DEVICE_INFORMATION, "DeviceName"), ): with suppress(KeyError, TypeError): return cast(str, self.data[key][item]) return DEFAULT_DEVICE_NAME @property def device_identifiers(self) -> set[tuple[str, str]]: """Get router identifiers for device registry.""" assert self.config_entry.unique_id is not None return {(DOMAIN, self.config_entry.unique_id)} @property def device_connections(self) -> set[tuple[str, str]]: """Get router connections for device registry.""" return { (dr.CONNECTION_NETWORK_MAC, x) for x in self.config_entry.data[CONF_MAC] } def _get_data(self, key: str, func: Callable[[], Any]) -> None: if not self.subscriptions.get(key): return if key in self.inflight_gets: _LOGGER.debug("Skipping already inflight get for %s", key) return self.inflight_gets.add(key) _LOGGER.debug("Getting %s for subscribers %s", key, self.subscriptions[key]) try: self.data[key] = func() except ResponseErrorLoginRequiredException: if isinstance(self.connection, AuthorizedConnection): _LOGGER.debug("Trying to authorize again") if self.connection.enforce_authorized_connection(): _LOGGER.debug( "success, %s will be updated by a future periodic run", key, ) else: _LOGGER.debug("failed") return _LOGGER.info( "%s requires authorization, excluding from future updates", key ) self.subscriptions.pop(key) except ResponseErrorException as exc: if not isinstance( exc, ResponseErrorNotSupportedException ) and exc.code not in ( # additional codes treated as unusupported -1, 100006, ): raise _LOGGER.info( "%s apparently not supported by device, excluding from future updates", key, ) self.subscriptions.pop(key) except Timeout: grace_left = ( self.notify_last_attempt - time.monotonic() + NOTIFY_SUPPRESS_TIMEOUT ) if grace_left > 0: _LOGGER.debug( "%s timed out, %.1fs notify timeout suppress grace remaining", key, grace_left, exc_info=True, ) else: raise finally: self.inflight_gets.discard(key) _LOGGER.debug("%s=%s", key, self.data.get(key)) def update(self) -> None: """Update router data.""" if self.suspended: _LOGGER.debug("Integration suspended, not updating data") return self._get_data(KEY_DEVICE_INFORMATION, self.client.device.information) if self.data.get(KEY_DEVICE_INFORMATION): # Full information includes everything in basic self.subscriptions.pop(KEY_DEVICE_BASIC_INFORMATION, None) self._get_data( KEY_DEVICE_BASIC_INFORMATION, self.client.device.basic_information ) self._get_data(KEY_DEVICE_SIGNAL, self.client.device.signal) self._get_data( KEY_DIALUP_MOBILE_DATASWITCH, self.client.dial_up.mobile_dataswitch ) self._get_data( KEY_MONITORING_MONTH_STATISTICS, self.client.monitoring.month_statistics ) self._get_data( KEY_MONITORING_CHECK_NOTIFICATIONS, self.client.monitoring.check_notifications, ) self._get_data(KEY_MONITORING_STATUS, self.client.monitoring.status) self._get_data( KEY_MONITORING_TRAFFIC_STATISTICS, self.client.monitoring.traffic_statistics ) self._get_data(KEY_NET_CURRENT_PLMN, self.client.net.current_plmn) self._get_data(KEY_NET_NET_MODE, self.client.net.net_mode) self._get_data(KEY_SMS_SMS_COUNT, self.client.sms.sms_count) self._get_data(KEY_LAN_HOST_INFO, self.client.lan.host_info) if self.data.get(KEY_LAN_HOST_INFO): # LAN host info includes everything in WLAN host list self.subscriptions.pop(KEY_WLAN_HOST_LIST, None) self._get_data(KEY_WLAN_HOST_LIST, self.client.wlan.host_list) self._get_data( KEY_WLAN_WIFI_FEATURE_SWITCH, self.client.wlan.wifi_feature_switch ) dispatcher_send(self.hass, UPDATE_SIGNAL, self.config_entry.unique_id) def logout(self) -> None: """Log out router session.""" if not isinstance(self.connection, AuthorizedConnection): return try: self.client.user.logout() except ResponseErrorNotSupportedException: _LOGGER.debug("Logout not supported by device", exc_info=True) except ResponseErrorLoginRequiredException: _LOGGER.debug("Logout not supported when not logged in", exc_info=True) except Exception: # pylint: disable=broad-except _LOGGER.warning("Logout error", exc_info=True) def cleanup(self, *_: Any) -> None: """Clean up resources.""" self.subscriptions.clear() self.logout() class HuaweiLteData(NamedTuple): """Shared state.""" hass_config: ConfigType # Our YAML config, keyed by router URL config: dict[str, dict[str, Any]] routers: dict[str, Router] async def async_setup_entry( # noqa: C901 hass: HomeAssistant, entry: ConfigEntry ) -> bool: """Set up Huawei LTE component from config entry.""" url = entry.data[CONF_URL] # Override settings from YAML config, but only if they're changed in it # Old values are stored as *_from_yaml in the config entry if yaml_config := hass.data[DOMAIN].config.get(url): # Config values new_data = {} for key in CONF_USERNAME, CONF_PASSWORD: if key in yaml_config: value = yaml_config[key] if value != entry.data.get(f"{key}_from_yaml"): new_data[f"{key}_from_yaml"] = value new_data[key] = value # Options new_options = {} yaml_recipient = yaml_config.get(NOTIFY_DOMAIN, {}).get(CONF_RECIPIENT) if yaml_recipient is not None and yaml_recipient != entry.options.get( f"{CONF_RECIPIENT}_from_yaml" ): new_options[f"{CONF_RECIPIENT}_from_yaml"] = yaml_recipient new_options[CONF_RECIPIENT] = yaml_recipient yaml_notify_name = yaml_config.get(NOTIFY_DOMAIN, {}).get(CONF_NAME) if yaml_notify_name is not None and yaml_notify_name != entry.options.get( f"{CONF_NAME}_from_yaml" ): new_options[f"{CONF_NAME}_from_yaml"] = yaml_notify_name new_options[CONF_NAME] = yaml_notify_name # Update entry if overrides were found if new_data or new_options: hass.config_entries.async_update_entry( entry, data={**entry.data, **new_data}, options={**entry.options, **new_options}, ) def get_connection() -> Connection: """Set up a connection.""" if entry.options.get(CONF_UNAUTHENTICATED_MODE): _LOGGER.debug("Connecting in unauthenticated mode, reduced feature set") connection = Connection(url, timeout=CONNECTION_TIMEOUT) else: _LOGGER.debug("Connecting in authenticated mode, full feature set") username = entry.data.get(CONF_USERNAME) or "" password = entry.data.get(CONF_PASSWORD) or "" connection = AuthorizedConnection( url, username=username, password=password, timeout=CONNECTION_TIMEOUT ) return connection try: connection = await hass.async_add_executor_job(get_connection) except Timeout as ex: raise ConfigEntryNotReady from ex # Set up router router = Router(hass, entry, connection, url) # Do initial data update await hass.async_add_executor_job(router.update) # Check that we found required information router_info = router.data.get(KEY_DEVICE_INFORMATION) if not entry.unique_id: # Transitional from < 2021.8: update None config entry and entity unique ids if router_info and (serial_number := router_info.get("SerialNumber")): hass.config_entries.async_update_entry(entry, unique_id=serial_number) ent_reg = entity_registry.async_get(hass) for entity_entry in entity_registry.async_entries_for_config_entry( ent_reg, entry.entry_id ): if not entity_entry.unique_id.startswith("None-"): continue new_unique_id = ( f"{serial_number}-{entity_entry.unique_id.split("-", 1)[1]}" ) ent_reg.async_update_entity( entity_entry.entity_id, new_unique_id=new_unique_id ) else: await hass.async_add_executor_job(router.cleanup) msg = ( "Could not resolve serial number to use as unique id for router at %s" ", setup failed" ) if not entry.data.get(CONF_PASSWORD): msg += ( ". Try setting up credentials for the router for one startup, " "unauthenticated mode can be enabled after that in integration " "settings" ) _LOGGER.error(msg, url) return False # Store reference to router hass.data[DOMAIN].routers[entry.unique_id] = router # Clear all subscriptions, enabled entities will push back theirs router.subscriptions.clear() # Update device MAC addresses on record. These can change due to toggling between # authenticated and unauthenticated modes, or likely also when enabling/disabling # SSIDs in the router config. try: wlan_settings = await hass.async_add_executor_job( router.client.wlan.multi_basic_settings ) except Exception: # pylint: disable=broad-except # Assume not supported, or authentication required but in unauthenticated mode wlan_settings = {} macs = get_device_macs(router_info or {}, wlan_settings) # Be careful not to overwrite a previous, more complete set with a partial one if macs and (not entry.data[CONF_MAC] or (router_info and wlan_settings)): new_data = dict(entry.data) new_data[CONF_MAC] = macs hass.config_entries.async_update_entry(entry, data=new_data) # Set up device registry if router.device_identifiers or router.device_connections: device_info = DeviceInfo( configuration_url=router.url, connections=router.device_connections, identifiers=router.device_identifiers, name=router.device_name, manufacturer="Huawei", ) hw_version = None sw_version = None if router_info: hw_version = router_info.get("HardwareVersion") sw_version = router_info.get("SoftwareVersion") if router_info.get("DeviceName"): device_info[ATTR_MODEL] = router_info["DeviceName"] if not sw_version and router.data.get(KEY_DEVICE_BASIC_INFORMATION): sw_version = router.data[KEY_DEVICE_BASIC_INFORMATION].get( "SoftwareVersion" ) if hw_version: device_info[ATTR_HW_VERSION] = hw_version if sw_version: device_info[ATTR_SW_VERSION] = sw_version device_registry = dr.async_get(hass) device_registry.async_get_or_create( config_entry_id=entry.entry_id, **device_info, ) # Forward config entry setup to platforms hass.config_entries.async_setup_platforms(entry, PLATFORMS) # Notify doesn't support config entry setup yet, load with discovery for now await discovery.async_load_platform( hass, NOTIFY_DOMAIN, DOMAIN, { ATTR_UNIQUE_ID: entry.unique_id, CONF_NAME: entry.options.get(CONF_NAME, DEFAULT_NOTIFY_SERVICE_NAME), CONF_RECIPIENT: entry.options.get(CONF_RECIPIENT), }, hass.data[DOMAIN].hass_config, ) def _update_router(*_: Any) -> None: """ Update router data. Separate passthrough function because lambdas don't work with track_time_interval. """ router.update() # Set up periodic update entry.async_on_unload( async_track_time_interval(hass, _update_router, SCAN_INTERVAL) ) # Clean up at end entry.async_on_unload( hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, router.cleanup) ) return True async def async_unload_entry(hass: HomeAssistant, config_entry: ConfigEntry) -> bool: """Unload config entry.""" # Forward config entry unload to platforms await hass.config_entries.async_unload_platforms(config_entry, PLATFORMS) # Forget about the router and invoke its cleanup router = hass.data[DOMAIN].routers.pop(config_entry.unique_id) await hass.async_add_executor_job(router.cleanup) return True async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool: """Set up Huawei LTE component.""" # dicttoxml (used by huawei-lte-api) has uselessly verbose INFO level. # https://github.com/quandyfactory/dicttoxml/issues/60 logging.getLogger("dicttoxml").setLevel(logging.WARNING) # Arrange our YAML config to dict with normalized URLs as keys domain_config: dict[str, dict[str, Any]] = {} if DOMAIN not in hass.data: hass.data[DOMAIN] = HuaweiLteData( hass_config=config, config=domain_config, routers={} ) for router_config in config.get(DOMAIN, []): domain_config[url_normalize(router_config.pop(CONF_URL))] = router_config def service_handler(service: ServiceCall) -> None: """ Apply a service. We key this using the router URL instead of its unique id / serial number, because the latter is not available anywhere in the UI. """ routers = hass.data[DOMAIN].routers if url := service.data.get(CONF_URL): router = next( (router for router in routers.values() if router.url == url), None ) elif not routers: _LOGGER.error("%s: no routers configured", service.service) return elif len(routers) == 1: router = next(iter(routers.values())) else: _LOGGER.error( "%s: more than one router configured, must specify one of URLs %s", service.service, sorted(router.url for router in routers.values()), ) return if not router: _LOGGER.error("%s: router %s unavailable", service.service, url) return if service.service == SERVICE_CLEAR_TRAFFIC_STATISTICS: if router.suspended: _LOGGER.debug("%s: ignored, integration suspended", service.service) return result = router.client.monitoring.set_clear_traffic() _LOGGER.debug("%s: %s", service.service, result) elif service.service == SERVICE_REBOOT: if router.suspended: _LOGGER.debug("%s: ignored, integration suspended", service.service) return result = router.client.device.reboot() _LOGGER.debug("%s: %s", service.service, result) elif service.service == SERVICE_RESUME_INTEGRATION: # Login will be handled automatically on demand router.suspended = False _LOGGER.debug("%s: %s", service.service, "done") elif service.service == SERVICE_SUSPEND_INTEGRATION: router.logout() router.suspended = True _LOGGER.debug("%s: %s", service.service, "done") else: _LOGGER.error("%s: unsupported service", service.service) for service in ADMIN_SERVICES: hass.helpers.service.async_register_admin_service( DOMAIN, service, service_handler, schema=SERVICE_SCHEMA, ) for url, router_config in domain_config.items(): hass.async_create_task( hass.config_entries.flow.async_init( DOMAIN, context={"source": SOURCE_IMPORT}, data={ CONF_URL: url, CONF_USERNAME: router_config.get(CONF_USERNAME), CONF_PASSWORD: router_config.get(CONF_PASSWORD), }, ) ) return True async def async_migrate_entry(hass: HomeAssistant, config_entry: ConfigEntry) -> bool: """Migrate config entry to new version.""" if config_entry.version == 1: options = dict(config_entry.options) recipient = options.get(CONF_RECIPIENT) if isinstance(recipient, str): options[CONF_RECIPIENT] = [x.strip() for x in recipient.split(",")] config_entry.version = 2 hass.config_entries.async_update_entry(config_entry, options=options) _LOGGER.info("Migrated config entry to version %d", config_entry.version) if config_entry.version == 2: config_entry.version = 3 data = dict(config_entry.data) data[CONF_MAC] = [] hass.config_entries.async_update_entry(config_entry, data=data) _LOGGER.info("Migrated config entry to version %d", config_entry.version) return True @dataclass class HuaweiLteBaseEntity(Entity): """Huawei LTE entity base class.""" router: Router _available: bool = field(default=True, init=False) _unsub_handlers: list[Callable] = field(default_factory=list, init=False) @property def _entity_name(self) -> str: raise NotImplementedError @property def _device_unique_id(self) -> str: """Return unique ID for entity within a router.""" raise NotImplementedError @property def unique_id(self) -> str: """Return unique ID for entity.""" return f"{self.router.config_entry.unique_id}-{self._device_unique_id}" @property def name(self) -> str: """Return entity name.""" return f"Huawei {self.router.device_name} {self._entity_name}" @property def available(self) -> bool: """Return whether the entity is available.""" return self._available @property def should_poll(self) -> bool: """Huawei LTE entities report their state without polling.""" return False async def async_update(self) -> None: """Update state.""" raise NotImplementedError async def async_added_to_hass(self) -> None: """Connect to update signals.""" self._unsub_handlers.append( async_dispatcher_connect(self.hass, UPDATE_SIGNAL, self._async_maybe_update) ) async def _async_maybe_update(self, config_entry_unique_id: str) -> None: """Update state if the update signal comes from our router.""" if config_entry_unique_id == self.router.config_entry.unique_id: self.async_schedule_update_ha_state(True) async def async_will_remove_from_hass(self) -> None: """Invoke unsubscription handlers.""" for unsub in self._unsub_handlers: unsub() self._unsub_handlers.clear() class HuaweiLteBaseEntityWithDevice(HuaweiLteBaseEntity): """Base entity with device info.""" @property def device_info(self) -> DeviceInfo: """Get info for matching with parent router.""" return DeviceInfo( connections=self.router.device_connections, identifiers=self.router.device_identifiers, )
"""Support for Huawei LTE routers.""" from __future__ import annotations from collections import defaultdict from collections.abc import Callable from contextlib import suppress from dataclasses import dataclass, field from datetime import timedelta import logging import time from typing import Any, NamedTuple, cast from huawei_lte_api.AuthorizedConnection import AuthorizedConnection from huawei_lte_api.Client import Client from huawei_lte_api.Connection import Connection from huawei_lte_api.exceptions import ( ResponseErrorException, ResponseErrorLoginRequiredException, ResponseErrorNotSupportedException, ) from requests.exceptions import Timeout from url_normalize import url_normalize import voluptuous as vol from homeassistant.components.notify import DOMAIN as NOTIFY_DOMAIN from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry from homeassistant.const import ( ATTR_HW_VERSION, ATTR_MODEL, ATTR_SW_VERSION, CONF_MAC, CONF_NAME, CONF_PASSWORD, CONF_RECIPIENT, CONF_URL, CONF_USERNAME, EVENT_HOMEASSISTANT_STOP, Platform, ) from homeassistant.core import HomeAssistant, ServiceCall from homeassistant.exceptions import ConfigEntryNotReady from homeassistant.helpers import ( config_validation as cv, device_registry as dr, discovery, entity_registry, ) from homeassistant.helpers.dispatcher import async_dispatcher_connect, dispatcher_send from homeassistant.helpers.entity import DeviceInfo, Entity from homeassistant.helpers.event import async_track_time_interval from homeassistant.helpers.typing import ConfigType from .const import ( ADMIN_SERVICES, ALL_KEYS, ATTR_UNIQUE_ID, CONF_UNAUTHENTICATED_MODE, CONNECTION_TIMEOUT, DEFAULT_DEVICE_NAME, DEFAULT_NOTIFY_SERVICE_NAME, DOMAIN, KEY_DEVICE_BASIC_INFORMATION, KEY_DEVICE_INFORMATION, KEY_DEVICE_SIGNAL, KEY_DIALUP_MOBILE_DATASWITCH, KEY_LAN_HOST_INFO, KEY_MONITORING_CHECK_NOTIFICATIONS, KEY_MONITORING_MONTH_STATISTICS, KEY_MONITORING_STATUS, KEY_MONITORING_TRAFFIC_STATISTICS, KEY_NET_CURRENT_PLMN, KEY_NET_NET_MODE, KEY_SMS_SMS_COUNT, KEY_WLAN_HOST_LIST, KEY_WLAN_WIFI_FEATURE_SWITCH, NOTIFY_SUPPRESS_TIMEOUT, SERVICE_CLEAR_TRAFFIC_STATISTICS, SERVICE_REBOOT, SERVICE_RESUME_INTEGRATION, SERVICE_SUSPEND_INTEGRATION, UPDATE_SIGNAL, ) from .utils import get_device_macs _LOGGER = logging.getLogger(__name__) SCAN_INTERVAL = timedelta(seconds=10) NOTIFY_SCHEMA = vol.Any( None, vol.Schema( { vol.Optional(CONF_NAME): cv.string, vol.Optional(CONF_RECIPIENT): vol.Any( None, vol.All(cv.ensure_list, [cv.string]) ), } ), ) CONFIG_SCHEMA = vol.Schema( { DOMAIN: vol.All( cv.ensure_list, [ vol.Schema( { vol.Required(CONF_URL): cv.url, vol.Optional(CONF_USERNAME): cv.string, vol.Optional(CONF_PASSWORD): cv.string, vol.Optional(NOTIFY_DOMAIN): NOTIFY_SCHEMA, } ) ], ) }, extra=vol.ALLOW_EXTRA, ) SERVICE_SCHEMA = vol.Schema({vol.Optional(CONF_URL): cv.url}) PLATFORMS = [ Platform.BINARY_SENSOR, Platform.DEVICE_TRACKER, Platform.SENSOR, Platform.SWITCH, ] @dataclass class Router: """Class for router state.""" hass: HomeAssistant config_entry: ConfigEntry connection: Connection url: str data: dict[str, Any] = field(default_factory=dict, init=False) subscriptions: dict[str, set[str]] = field( default_factory=lambda: defaultdict( set, ((x, {"initial_scan"}) for x in ALL_KEYS) ), init=False, ) inflight_gets: set[str] = field(default_factory=set, init=False) client: Client = field(init=False) suspended: bool = field(default=False, init=False) notify_last_attempt: float = field(default=-1, init=False) def __post_init__(self) -> None: """Set up internal state on init.""" self.client = Client(self.connection) @property def device_name(self) -> str: """Get router device name.""" for key, item in ( (KEY_DEVICE_BASIC_INFORMATION, "devicename"), (KEY_DEVICE_INFORMATION, "DeviceName"), ): with suppress(KeyError, TypeError): return cast(str, self.data[key][item]) return DEFAULT_DEVICE_NAME @property def device_identifiers(self) -> set[tuple[str, str]]: """Get router identifiers for device registry.""" assert self.config_entry.unique_id is not None return {(DOMAIN, self.config_entry.unique_id)} @property def device_connections(self) -> set[tuple[str, str]]: """Get router connections for device registry.""" return { (dr.CONNECTION_NETWORK_MAC, x) for x in self.config_entry.data[CONF_MAC] } def _get_data(self, key: str, func: Callable[[], Any]) -> None: if not self.subscriptions.get(key): return if key in self.inflight_gets: _LOGGER.debug("Skipping already inflight get for %s", key) return self.inflight_gets.add(key) _LOGGER.debug("Getting %s for subscribers %s", key, self.subscriptions[key]) try: self.data[key] = func() except ResponseErrorLoginRequiredException: if isinstance(self.connection, AuthorizedConnection): _LOGGER.debug("Trying to authorize again") if self.connection.enforce_authorized_connection(): _LOGGER.debug( "success, %s will be updated by a future periodic run", key, ) else: _LOGGER.debug("failed") return _LOGGER.info( "%s requires authorization, excluding from future updates", key ) self.subscriptions.pop(key) except ResponseErrorException as exc: if not isinstance( exc, ResponseErrorNotSupportedException ) and exc.code not in ( # additional codes treated as unusupported -1, 100006, ): raise _LOGGER.info( "%s apparently not supported by device, excluding from future updates", key, ) self.subscriptions.pop(key) except Timeout: grace_left = ( self.notify_last_attempt - time.monotonic() + NOTIFY_SUPPRESS_TIMEOUT ) if grace_left > 0: _LOGGER.debug( "%s timed out, %.1fs notify timeout suppress grace remaining", key, grace_left, exc_info=True, ) else: raise finally: self.inflight_gets.discard(key) _LOGGER.debug("%s=%s", key, self.data.get(key)) def update(self) -> None: """Update router data.""" if self.suspended: _LOGGER.debug("Integration suspended, not updating data") return self._get_data(KEY_DEVICE_INFORMATION, self.client.device.information) if self.data.get(KEY_DEVICE_INFORMATION): # Full information includes everything in basic self.subscriptions.pop(KEY_DEVICE_BASIC_INFORMATION, None) self._get_data( KEY_DEVICE_BASIC_INFORMATION, self.client.device.basic_information ) self._get_data(KEY_DEVICE_SIGNAL, self.client.device.signal) self._get_data( KEY_DIALUP_MOBILE_DATASWITCH, self.client.dial_up.mobile_dataswitch ) self._get_data( KEY_MONITORING_MONTH_STATISTICS, self.client.monitoring.month_statistics ) self._get_data( KEY_MONITORING_CHECK_NOTIFICATIONS, self.client.monitoring.check_notifications, ) self._get_data(KEY_MONITORING_STATUS, self.client.monitoring.status) self._get_data( KEY_MONITORING_TRAFFIC_STATISTICS, self.client.monitoring.traffic_statistics ) self._get_data(KEY_NET_CURRENT_PLMN, self.client.net.current_plmn) self._get_data(KEY_NET_NET_MODE, self.client.net.net_mode) self._get_data(KEY_SMS_SMS_COUNT, self.client.sms.sms_count) self._get_data(KEY_LAN_HOST_INFO, self.client.lan.host_info) if self.data.get(KEY_LAN_HOST_INFO): # LAN host info includes everything in WLAN host list self.subscriptions.pop(KEY_WLAN_HOST_LIST, None) self._get_data(KEY_WLAN_HOST_LIST, self.client.wlan.host_list) self._get_data( KEY_WLAN_WIFI_FEATURE_SWITCH, self.client.wlan.wifi_feature_switch ) dispatcher_send(self.hass, UPDATE_SIGNAL, self.config_entry.unique_id) def logout(self) -> None: """Log out router session.""" if not isinstance(self.connection, AuthorizedConnection): return try: self.client.user.logout() except ResponseErrorNotSupportedException: _LOGGER.debug("Logout not supported by device", exc_info=True) except ResponseErrorLoginRequiredException: _LOGGER.debug("Logout not supported when not logged in", exc_info=True) except Exception: # pylint: disable=broad-except _LOGGER.warning("Logout error", exc_info=True) def cleanup(self, *_: Any) -> None: """Clean up resources.""" self.subscriptions.clear() self.logout() class HuaweiLteData(NamedTuple): """Shared state.""" hass_config: ConfigType # Our YAML config, keyed by router URL config: dict[str, dict[str, Any]] routers: dict[str, Router] async def async_setup_entry( # noqa: C901 hass: HomeAssistant, entry: ConfigEntry ) -> bool: """Set up Huawei LTE component from config entry.""" url = entry.data[CONF_URL] # Override settings from YAML config, but only if they're changed in it # Old values are stored as *_from_yaml in the config entry if yaml_config := hass.data[DOMAIN].config.get(url): # Config values new_data = {} for key in CONF_USERNAME, CONF_PASSWORD: if key in yaml_config: value = yaml_config[key] if value != entry.data.get(f"{key}_from_yaml"): new_data[f"{key}_from_yaml"] = value new_data[key] = value # Options new_options = {} yaml_recipient = yaml_config.get(NOTIFY_DOMAIN, {}).get(CONF_RECIPIENT) if yaml_recipient is not None and yaml_recipient != entry.options.get( f"{CONF_RECIPIENT}_from_yaml" ): new_options[f"{CONF_RECIPIENT}_from_yaml"] = yaml_recipient new_options[CONF_RECIPIENT] = yaml_recipient yaml_notify_name = yaml_config.get(NOTIFY_DOMAIN, {}).get(CONF_NAME) if yaml_notify_name is not None and yaml_notify_name != entry.options.get( f"{CONF_NAME}_from_yaml" ): new_options[f"{CONF_NAME}_from_yaml"] = yaml_notify_name new_options[CONF_NAME] = yaml_notify_name # Update entry if overrides were found if new_data or new_options: hass.config_entries.async_update_entry( entry, data={**entry.data, **new_data}, options={**entry.options, **new_options}, ) def get_connection() -> Connection: """Set up a connection.""" if entry.options.get(CONF_UNAUTHENTICATED_MODE): _LOGGER.debug("Connecting in unauthenticated mode, reduced feature set") connection = Connection(url, timeout=CONNECTION_TIMEOUT) else: _LOGGER.debug("Connecting in authenticated mode, full feature set") username = entry.data.get(CONF_USERNAME) or "" password = entry.data.get(CONF_PASSWORD) or "" connection = AuthorizedConnection( url, username=username, password=password, timeout=CONNECTION_TIMEOUT ) return connection try: connection = await hass.async_add_executor_job(get_connection) except Timeout as ex: raise ConfigEntryNotReady from ex # Set up router router = Router(hass, entry, connection, url) # Do initial data update await hass.async_add_executor_job(router.update) # Check that we found required information router_info = router.data.get(KEY_DEVICE_INFORMATION) if not entry.unique_id: # Transitional from < 2021.8: update None config entry and entity unique ids if router_info and (serial_number := router_info.get("SerialNumber")): hass.config_entries.async_update_entry(entry, unique_id=serial_number) ent_reg = entity_registry.async_get(hass) for entity_entry in entity_registry.async_entries_for_config_entry( ent_reg, entry.entry_id ): if not entity_entry.unique_id.startswith("None-"): continue new_unique_id = ( f"{serial_number}-{entity_entry.unique_id.split('-', 1)[1]}" ) ent_reg.async_update_entity( entity_entry.entity_id, new_unique_id=new_unique_id ) else: await hass.async_add_executor_job(router.cleanup) msg = ( "Could not resolve serial number to use as unique id for router at %s" ", setup failed" ) if not entry.data.get(CONF_PASSWORD): msg += ( ". Try setting up credentials for the router for one startup, " "unauthenticated mode can be enabled after that in integration " "settings" ) _LOGGER.error(msg, url) return False # Store reference to router hass.data[DOMAIN].routers[entry.unique_id] = router # Clear all subscriptions, enabled entities will push back theirs router.subscriptions.clear() # Update device MAC addresses on record. These can change due to toggling between # authenticated and unauthenticated modes, or likely also when enabling/disabling # SSIDs in the router config. try: wlan_settings = await hass.async_add_executor_job( router.client.wlan.multi_basic_settings ) except Exception: # pylint: disable=broad-except # Assume not supported, or authentication required but in unauthenticated mode wlan_settings = {} macs = get_device_macs(router_info or {}, wlan_settings) # Be careful not to overwrite a previous, more complete set with a partial one if macs and (not entry.data[CONF_MAC] or (router_info and wlan_settings)): new_data = dict(entry.data) new_data[CONF_MAC] = macs hass.config_entries.async_update_entry(entry, data=new_data) # Set up device registry if router.device_identifiers or router.device_connections: device_info = DeviceInfo( configuration_url=router.url, connections=router.device_connections, identifiers=router.device_identifiers, name=router.device_name, manufacturer="Huawei", ) hw_version = None sw_version = None if router_info: hw_version = router_info.get("HardwareVersion") sw_version = router_info.get("SoftwareVersion") if router_info.get("DeviceName"): device_info[ATTR_MODEL] = router_info["DeviceName"] if not sw_version and router.data.get(KEY_DEVICE_BASIC_INFORMATION): sw_version = router.data[KEY_DEVICE_BASIC_INFORMATION].get( "SoftwareVersion" ) if hw_version: device_info[ATTR_HW_VERSION] = hw_version if sw_version: device_info[ATTR_SW_VERSION] = sw_version device_registry = dr.async_get(hass) device_registry.async_get_or_create( config_entry_id=entry.entry_id, **device_info, ) # Forward config entry setup to platforms hass.config_entries.async_setup_platforms(entry, PLATFORMS) # Notify doesn't support config entry setup yet, load with discovery for now await discovery.async_load_platform( hass, NOTIFY_DOMAIN, DOMAIN, { ATTR_UNIQUE_ID: entry.unique_id, CONF_NAME: entry.options.get(CONF_NAME, DEFAULT_NOTIFY_SERVICE_NAME), CONF_RECIPIENT: entry.options.get(CONF_RECIPIENT), }, hass.data[DOMAIN].hass_config, ) def _update_router(*_: Any) -> None: """ Update router data. Separate passthrough function because lambdas don't work with track_time_interval. """ router.update() # Set up periodic update entry.async_on_unload( async_track_time_interval(hass, _update_router, SCAN_INTERVAL) ) # Clean up at end entry.async_on_unload( hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, router.cleanup) ) return True async def async_unload_entry(hass: HomeAssistant, config_entry: ConfigEntry) -> bool: """Unload config entry.""" # Forward config entry unload to platforms await hass.config_entries.async_unload_platforms(config_entry, PLATFORMS) # Forget about the router and invoke its cleanup router = hass.data[DOMAIN].routers.pop(config_entry.unique_id) await hass.async_add_executor_job(router.cleanup) return True async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool: """Set up Huawei LTE component.""" # dicttoxml (used by huawei-lte-api) has uselessly verbose INFO level. # https://github.com/quandyfactory/dicttoxml/issues/60 logging.getLogger("dicttoxml").setLevel(logging.WARNING) # Arrange our YAML config to dict with normalized URLs as keys domain_config: dict[str, dict[str, Any]] = {} if DOMAIN not in hass.data: hass.data[DOMAIN] = HuaweiLteData( hass_config=config, config=domain_config, routers={} ) for router_config in config.get(DOMAIN, []): domain_config[url_normalize(router_config.pop(CONF_URL))] = router_config def service_handler(service: ServiceCall) -> None: """ Apply a service. We key this using the router URL instead of its unique id / serial number, because the latter is not available anywhere in the UI. """ routers = hass.data[DOMAIN].routers if url := service.data.get(CONF_URL): router = next( (router for router in routers.values() if router.url == url), None ) elif not routers: _LOGGER.error("%s: no routers configured", service.service) return elif len(routers) == 1: router = next(iter(routers.values())) else: _LOGGER.error( "%s: more than one router configured, must specify one of URLs %s", service.service, sorted(router.url for router in routers.values()), ) return if not router: _LOGGER.error("%s: router %s unavailable", service.service, url) return if service.service == SERVICE_CLEAR_TRAFFIC_STATISTICS: if router.suspended: _LOGGER.debug("%s: ignored, integration suspended", service.service) return result = router.client.monitoring.set_clear_traffic() _LOGGER.debug("%s: %s", service.service, result) elif service.service == SERVICE_REBOOT: if router.suspended: _LOGGER.debug("%s: ignored, integration suspended", service.service) return result = router.client.device.reboot() _LOGGER.debug("%s: %s", service.service, result) elif service.service == SERVICE_RESUME_INTEGRATION: # Login will be handled automatically on demand router.suspended = False _LOGGER.debug("%s: %s", service.service, "done") elif service.service == SERVICE_SUSPEND_INTEGRATION: router.logout() router.suspended = True _LOGGER.debug("%s: %s", service.service, "done") else: _LOGGER.error("%s: unsupported service", service.service) for service in ADMIN_SERVICES: hass.helpers.service.async_register_admin_service( DOMAIN, service, service_handler, schema=SERVICE_SCHEMA, ) for url, router_config in domain_config.items(): hass.async_create_task( hass.config_entries.flow.async_init( DOMAIN, context={"source": SOURCE_IMPORT}, data={ CONF_URL: url, CONF_USERNAME: router_config.get(CONF_USERNAME), CONF_PASSWORD: router_config.get(CONF_PASSWORD), }, ) ) return True async def async_migrate_entry(hass: HomeAssistant, config_entry: ConfigEntry) -> bool: """Migrate config entry to new version.""" if config_entry.version == 1: options = dict(config_entry.options) recipient = options.get(CONF_RECIPIENT) if isinstance(recipient, str): options[CONF_RECIPIENT] = [x.strip() for x in recipient.split(",")] config_entry.version = 2 hass.config_entries.async_update_entry(config_entry, options=options) _LOGGER.info("Migrated config entry to version %d", config_entry.version) if config_entry.version == 2: config_entry.version = 3 data = dict(config_entry.data) data[CONF_MAC] = [] hass.config_entries.async_update_entry(config_entry, data=data) _LOGGER.info("Migrated config entry to version %d", config_entry.version) return True @dataclass class HuaweiLteBaseEntity(Entity): """Huawei LTE entity base class.""" router: Router _available: bool = field(default=True, init=False) _unsub_handlers: list[Callable] = field(default_factory=list, init=False) @property def _entity_name(self) -> str: raise NotImplementedError @property def _device_unique_id(self) -> str: """Return unique ID for entity within a router.""" raise NotImplementedError @property def unique_id(self) -> str: """Return unique ID for entity.""" return f"{self.router.config_entry.unique_id}-{self._device_unique_id}" @property def name(self) -> str: """Return entity name.""" return f"Huawei {self.router.device_name} {self._entity_name}" @property def available(self) -> bool: """Return whether the entity is available.""" return self._available @property def should_poll(self) -> bool: """Huawei LTE entities report their state without polling.""" return False async def async_update(self) -> None: """Update state.""" raise NotImplementedError async def async_added_to_hass(self) -> None: """Connect to update signals.""" self._unsub_handlers.append( async_dispatcher_connect(self.hass, UPDATE_SIGNAL, self._async_maybe_update) ) async def _async_maybe_update(self, config_entry_unique_id: str) -> None: """Update state if the update signal comes from our router.""" if config_entry_unique_id == self.router.config_entry.unique_id: self.async_schedule_update_ha_state(True) async def async_will_remove_from_hass(self) -> None: """Invoke unsubscription handlers.""" for unsub in self._unsub_handlers: unsub() self._unsub_handlers.clear() class HuaweiLteBaseEntityWithDevice(HuaweiLteBaseEntity): """Base entity with device info.""" @property def device_info(self) -> DeviceInfo: """Get info for matching with parent router.""" return DeviceInfo( connections=self.router.device_connections, identifiers=self.router.device_identifiers, )
# Copyright 2020 The XLS Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. r"""Creates an SMTLIB2 file with an n-bit shift equivalence proof. This file receives an integer from --n and an integer from --chains, and creates an SMTLIB2 file containing an (n)-bit shifter equivalence proof with "chains" nested operations. For example, to create an smt2 file for 4-bit multiplication and 5 nested shift operations, we can run (after building): $ bazel-bin/xls/experimental/smtlib/n_bit_nested_shift_generator \ --n=4 --chains=5 Once an smt2 file is created, we can run: $ <solver command> <filename> The created SMTLIB2 file asserts that the shifter and the builtin shift DO NOT produce the same result, so the output we expect to see is: $ unsat meaning the shifter and the builtin shift never produce different results. They are logically equivalent. """ from xls.common import gfile def description_comments(n, shifts, f): """Writes comments to the top of the output file describing what it does. Write comments to the top of the smt2 file describing the proof it contains: the operation, how many bits in the arguments, and how many operations. Args: n: An integer, the number of bits in each input bitvector. shifts: An integer, the number of nested shift operations. f: The file to write into. """ print( f"""; The following SMT-LIB verifies that a chain of {shifts} {n}-bit shifts ; is equivalent to SMT-LIB's built in bit-vector shift. """, file=f) def logic_and_variables(n, shifts, f): """Sets the logic for the smt2 file, and declare/define variables. Write the set-logic for the proof (QF_BV is the bitvector logic), declare the input bitvector variables, and define variables for their indices. Note that x_i_j corresponds to index j of the i-th input bitvector. Args: n: An integer, the number of bits in each input bitvector. shifts: An integer, the number of nested shift operations. f: The file to write into. """ print( """(set-logic QF_BV) ; Declare bit-vectors and proxies for indices""", file=f) for i in range(shifts + 1): print(f"(declare-fun x_{i} () (_ BitVec {n}))", file=f) for j in range(n): print( f"(define-fun x_{i}_{j} () (_ BitVec 1) ((_ extract {j} {j}) x_{i}))", file=f) print("", file=f) def get_shift_bit(char, var): """Returns the var argument if char is '1', otherwise (bvnot var). Args: char: A string, either '0' or '1' var: A string, representing a variable we've defined in the smt2 file. Returns: The value or its inversion. """ if char == "0": return f"(bvnot {var})" elif char == "1": return var else: raise ValueError("Bit string contained value that wasn't 0 or 1") def get_shift_bits(n, bit_string, shift): """Given a bit-string, return an expression to map to the bit-string. Each index of the given bit-string corresponds to the index of an n-bit bit vector. This function returns those bits, using the variable for the bit if the corresponding index in the bit-string is '1', and using the negation of the variable if the index is '0'. Args: n: An integer, the number of bits in the bitvector bit_string: A string of '0's and '1's shift: An integer, the nested shift operation that we're currently at. Returns: The shift op to extract the desired bit. """ bits = [] var = f"x_{shift}" if shift == 0 else f"shl_{shift - 1}" for i in range(n - 1, -1, -1): bits.append(get_shift_bit(bit_string[n - i - 1], f"{var}_{i}")) return " ".join(bits) def yield_shift_conjunctions(i, n, shift): """Yields all of the expressions from get_shift_bits, from 0 to i. This function iterates from 0 to i (inclusive), and represents each number in the iteration as an n-bit bit-string. For each of these bit-strings, it yields the bit-vector anding of all of the bits in the bit-vector at the shift we are at, negating bits whose corresponding index in the bit-string have a '0'. Args: i: An integer, the index we iterate up to. n: An integer, the number of bits in the bit-string shift: An integer, the nested shift we're at. """ if n == 1: yield get_shift_bits(n, format(i, f"0{n}b"), shift) else: for step in range(i + 1): bit_string = format(step, f"0{n}b") yield "(bvand " + get_shift_bits(n, bit_string, shift) + ")" def shift_index(i, n, shift, f): """Writes the definition of bit i of the output of shift (shift). Bit i of the output of shift (shift) is defined by what bits in the shifting bitvector are '1' and '0'. For every case, bit i of the output will be equal to a certain bit in the input bitvector that is being shifted. Args: i: An integer, the index we iterate up to. n: An integer, the number of bits in the bit-string shift: An integer, the nested shift we're at. f: The file to write into. """ clauses = [] j = i for conjunction in yield_shift_conjunctions(i, n, shift): clauses.append(f"(bvand x_{shift + 1}_{j} {conjunction})") j -= 1 if i == 0: assignment = " ".join(clauses) else: assignment = f"(bvor {" ".join(clauses)})" print(f"(define-fun shl_{shift}_{i} () (_ BitVec 1) {assignment})", file=f) def concat_shift_indices(n, shift): """Returns the concat expression of the bits at shift (shift). Args: n: An integer, the number of bits in the bit-string shift: An integer, the nested shift we're at. """ concats = [f"shl_{shift}_0"] for i in range(1, n): rhs = concats[i - 1] concat = ["(concat", f"shl_{shift}_{i}", rhs + ")"] concats.append(" ".join(concat)) return concats[-1] def shift_level(n, shift, f): """Write the output of shift (shift), concatenating all of its bits together. Args: n: An integer, the number of bits in the bit-string shift: An integer, the nested shift we're at. f: The file to write into. """ for i in range(n): shift_index(i, n, shift, f) print( f"\n(define-fun shl_{shift} () (_ BitVec {n}) {concat_shift_indices(n, shift)})\n", file=f) def get_nested_expression(shifts): """Returns a string representing the addition of all the input bitvectors. Args: shifts: An integer, the number of nested shift operations. """ nested_expressions = [] for i in range(shifts): rhs = "x_0" if i == 0 else nested_expressions[i - 1] expression = ["(bvshl", f"x_{i + 1}", rhs + ")"] nested_expressions.append(" ".join(expression)) return nested_expressions[-1] def assert_and_check_sat(n, shifts, f): """Writes an (unsatisfiable) assertion and tell the solver to check it. Write the assertion that the output of the 'by-hand' shift, shl_(adders - 1), does not equal the output of the builtin bvshl operation, and tell the solver to check the satisfiability. Args: n: An integer, the number of bits in each bitvector. shifts: An integer, the number of nested shift operations. f: The file to write into. """ print( f"""; Compare {n}-bit shift result and internal shift and solve (assert (not (= shl_{shifts - 1} {get_nested_expression(shifts)}))) (check-sat)""", file=f) def n_bit_nested_shift_existing_file(n, shifts, f): """Given a file, write an n-bit shift proof with a chain of (shifts) shifts. Args: n: An integer, the number of bits in each bitvector. shifts: An integer, the number of nested shift operations. f: The file to write into. """ description_comments(n, shifts, f) logic_and_variables(n, shifts, f) for shift in range(shifts): shift_level(n, shift, f) assert_and_check_sat(n, shifts, f) def n_bit_nested_shift_new_file(n, shifts): """Makes a new file and writes an n-bit shift proof. Args: n: An integer, the number of bits in each bitvector. shifts: An integer, the number of nested shift operations. """ with gfile.open(f"shift{shifts}_2x{n}.smt2", "w") as f: n_bit_nested_shift_existing_file(n, shifts, f)
# Copyright 2020 The XLS Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. r"""Creates an SMTLIB2 file with an n-bit shift equivalence proof. This file receives an integer from --n and an integer from --chains, and creates an SMTLIB2 file containing an (n)-bit shifter equivalence proof with "chains" nested operations. For example, to create an smt2 file for 4-bit multiplication and 5 nested shift operations, we can run (after building): $ bazel-bin/xls/experimental/smtlib/n_bit_nested_shift_generator \ --n=4 --chains=5 Once an smt2 file is created, we can run: $ <solver command> <filename> The created SMTLIB2 file asserts that the shifter and the builtin shift DO NOT produce the same result, so the output we expect to see is: $ unsat meaning the shifter and the builtin shift never produce different results. They are logically equivalent. """ from xls.common import gfile def description_comments(n, shifts, f): """Writes comments to the top of the output file describing what it does. Write comments to the top of the smt2 file describing the proof it contains: the operation, how many bits in the arguments, and how many operations. Args: n: An integer, the number of bits in each input bitvector. shifts: An integer, the number of nested shift operations. f: The file to write into. """ print( f"""; The following SMT-LIB verifies that a chain of {shifts} {n}-bit shifts ; is equivalent to SMT-LIB's built in bit-vector shift. """, file=f) def logic_and_variables(n, shifts, f): """Sets the logic for the smt2 file, and declare/define variables. Write the set-logic for the proof (QF_BV is the bitvector logic), declare the input bitvector variables, and define variables for their indices. Note that x_i_j corresponds to index j of the i-th input bitvector. Args: n: An integer, the number of bits in each input bitvector. shifts: An integer, the number of nested shift operations. f: The file to write into. """ print( """(set-logic QF_BV) ; Declare bit-vectors and proxies for indices""", file=f) for i in range(shifts + 1): print(f"(declare-fun x_{i} () (_ BitVec {n}))", file=f) for j in range(n): print( f"(define-fun x_{i}_{j} () (_ BitVec 1) ((_ extract {j} {j}) x_{i}))", file=f) print("", file=f) def get_shift_bit(char, var): """Returns the var argument if char is '1', otherwise (bvnot var). Args: char: A string, either '0' or '1' var: A string, representing a variable we've defined in the smt2 file. Returns: The value or its inversion. """ if char == "0": return f"(bvnot {var})" elif char == "1": return var else: raise ValueError("Bit string contained value that wasn't 0 or 1") def get_shift_bits(n, bit_string, shift): """Given a bit-string, return an expression to map to the bit-string. Each index of the given bit-string corresponds to the index of an n-bit bit vector. This function returns those bits, using the variable for the bit if the corresponding index in the bit-string is '1', and using the negation of the variable if the index is '0'. Args: n: An integer, the number of bits in the bitvector bit_string: A string of '0's and '1's shift: An integer, the nested shift operation that we're currently at. Returns: The shift op to extract the desired bit. """ bits = [] var = f"x_{shift}" if shift == 0 else f"shl_{shift - 1}" for i in range(n - 1, -1, -1): bits.append(get_shift_bit(bit_string[n - i - 1], f"{var}_{i}")) return " ".join(bits) def yield_shift_conjunctions(i, n, shift): """Yields all of the expressions from get_shift_bits, from 0 to i. This function iterates from 0 to i (inclusive), and represents each number in the iteration as an n-bit bit-string. For each of these bit-strings, it yields the bit-vector anding of all of the bits in the bit-vector at the shift we are at, negating bits whose corresponding index in the bit-string have a '0'. Args: i: An integer, the index we iterate up to. n: An integer, the number of bits in the bit-string shift: An integer, the nested shift we're at. """ if n == 1: yield get_shift_bits(n, format(i, f"0{n}b"), shift) else: for step in range(i + 1): bit_string = format(step, f"0{n}b") yield "(bvand " + get_shift_bits(n, bit_string, shift) + ")" def shift_index(i, n, shift, f): """Writes the definition of bit i of the output of shift (shift). Bit i of the output of shift (shift) is defined by what bits in the shifting bitvector are '1' and '0'. For every case, bit i of the output will be equal to a certain bit in the input bitvector that is being shifted. Args: i: An integer, the index we iterate up to. n: An integer, the number of bits in the bit-string shift: An integer, the nested shift we're at. f: The file to write into. """ clauses = [] j = i for conjunction in yield_shift_conjunctions(i, n, shift): clauses.append(f"(bvand x_{shift + 1}_{j} {conjunction})") j -= 1 if i == 0: assignment = " ".join(clauses) else: assignment = f"(bvor {' '.join(clauses)})" print(f"(define-fun shl_{shift}_{i} () (_ BitVec 1) {assignment})", file=f) def concat_shift_indices(n, shift): """Returns the concat expression of the bits at shift (shift). Args: n: An integer, the number of bits in the bit-string shift: An integer, the nested shift we're at. """ concats = [f"shl_{shift}_0"] for i in range(1, n): rhs = concats[i - 1] concat = ["(concat", f"shl_{shift}_{i}", rhs + ")"] concats.append(" ".join(concat)) return concats[-1] def shift_level(n, shift, f): """Write the output of shift (shift), concatenating all of its bits together. Args: n: An integer, the number of bits in the bit-string shift: An integer, the nested shift we're at. f: The file to write into. """ for i in range(n): shift_index(i, n, shift, f) print( f"\n(define-fun shl_{shift} () (_ BitVec {n}) {concat_shift_indices(n, shift)})\n", file=f) def get_nested_expression(shifts): """Returns a string representing the addition of all the input bitvectors. Args: shifts: An integer, the number of nested shift operations. """ nested_expressions = [] for i in range(shifts): rhs = "x_0" if i == 0 else nested_expressions[i - 1] expression = ["(bvshl", f"x_{i + 1}", rhs + ")"] nested_expressions.append(" ".join(expression)) return nested_expressions[-1] def assert_and_check_sat(n, shifts, f): """Writes an (unsatisfiable) assertion and tell the solver to check it. Write the assertion that the output of the 'by-hand' shift, shl_(adders - 1), does not equal the output of the builtin bvshl operation, and tell the solver to check the satisfiability. Args: n: An integer, the number of bits in each bitvector. shifts: An integer, the number of nested shift operations. f: The file to write into. """ print( f"""; Compare {n}-bit shift result and internal shift and solve (assert (not (= shl_{shifts - 1} {get_nested_expression(shifts)}))) (check-sat)""", file=f) def n_bit_nested_shift_existing_file(n, shifts, f): """Given a file, write an n-bit shift proof with a chain of (shifts) shifts. Args: n: An integer, the number of bits in each bitvector. shifts: An integer, the number of nested shift operations. f: The file to write into. """ description_comments(n, shifts, f) logic_and_variables(n, shifts, f) for shift in range(shifts): shift_level(n, shift, f) assert_and_check_sat(n, shifts, f) def n_bit_nested_shift_new_file(n, shifts): """Makes a new file and writes an n-bit shift proof. Args: n: An integer, the number of bits in each bitvector. shifts: An integer, the number of nested shift operations. """ with gfile.open(f"shift{shifts}_2x{n}.smt2", "w") as f: n_bit_nested_shift_existing_file(n, shifts, f)
#!/usr/bin/env python3 # Copyright (C) 2017-2020 The btclib developers # # This file is part of btclib. It is subject to the license terms in the # LICENSE file found in the top-level directory of this distribution. # # No part of btclib including this file, may be copied, modified, propagated, # or distributed except according to the terms contained in the LICENSE file. """BIP32 Hierarchical Deterministic Wallet functions. A deterministic wallet is a hash-chain of private/public key pairs that derives from a single root, which is the only element requiring backup. Moreover, there are schemes where public keys can be calculated without accessing private keys. A hierarchical deterministic wallet is a tree of multiple hash-chains, derived from a single root, allowing for selective sharing of keypair chains. Here, the HD wallet is implemented according to BIP32 bitcoin standard https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki. A BIP32 extended key is 78 bytes: - [ : 4] version - [ 4: 5] depth in the derivation path - [ 5: 9] parent fingerprint - [ 9:13] index - [13:45] chain code - [45:78] compressed pubkey or [0x00][prvkey] """ import copy import hmac from typing import Iterable, List, Tuple, TypedDict, Union from . import bip39, electrum from .alias import INF, Octets, Path, Point, String, XkeyDict from .base58 import b58decode, b58encode from .curvemult import mult from .curves import secp256k1 as ec from .mnemonic import Mnemonic from .network import (_NETWORKS, _P2WPKH_P2SH_PRV_PREFIXES, _P2WPKH_P2SH_PUB_PREFIXES, _P2WPKH_PRV_PREFIXES, _P2WPKH_PUB_PREFIXES, _P2WSH_P2SH_PRV_PREFIXES, _P2WSH_P2SH_PUB_PREFIXES, _P2WSH_PRV_PREFIXES, _P2WSH_PUB_PREFIXES, _PRV_VERSIONS, _PUB_VERSIONS, _REPEATED_NETWORKS, _XPRV_PREFIXES, _XPUB_PREFIXES, MAIN_xprv, MAIN_xpub, MAIN_yprv, MAIN_Yprv, MAIN_ypub, MAIN_Ypub, MAIN_zprv, MAIN_Zprv, MAIN_zpub, MAIN_Zpub, TEST_tprv, TEST_tpub, TEST_uprv, TEST_Uprv, TEST_upub, TEST_Upub, TEST_vprv, TEST_Vprv, TEST_vpub, TEST_Vpub) from .secpoint import bytes_from_point, point_from_octets from .utils import bytes_from_octets, hash160 def _check_version_key(v: bytes, k: bytes) -> None: if v in _PRV_VERSIONS: if k[0] != 0: raise ValueError("prv_version/pubkey mismatch") elif v in _PUB_VERSIONS: if k[0] not in (2, 3): raise ValueError("pub_version/prvkey mismatch") else: raise ValueError(f"unknown extended key version {v!r}") def _check_depth_pfp_index(d: int, pfp: bytes, i: bytes) -> None: if d < 0 or d > 255: raise ValueError(f"Invalid BIP32 depth ({d})") elif d == 0: if pfp != b'\x00\x00\x00\x00': m = f"Zero depth with non-zero parent_fingerprint {pfp!r}" raise ValueError(m) if i != b'\x00\x00\x00\x00': m = f"Zero depth with non-zero index {i!r}" raise ValueError(m) else: if pfp == b'\x00\x00\x00\x00': m = f"Zon-zero depth ({d}) with zero parent_fingerprint {pfp!r}" raise ValueError(m) def deserialize(xkey: Octets) -> XkeyDict: if isinstance(xkey, str): xkey = xkey.strip() xkey = b58decode(xkey, 78) d: XkeyDict = { 'version' : xkey[:4], 'depth' : xkey[4], 'parent_fingerprint' : xkey[5:9], 'index' : xkey[9:13], 'chain_code' : xkey[13:45], 'key' : xkey[45:], # extensions 'q' : 0, # non zero only if xprv 'Q' : INF, # non INF only if xpub 'network' : '' } _check_version_key(d['version'], d['key']) _check_depth_pfp_index(d['depth'], d['parent_fingerprint'], d['index']) # calculate d['q'] and d['Q'] if d['key'][0] == 0: q = int.from_bytes(d['key'][1:], byteorder='big') if not 0 < q < ec.n: raise ValueError(f"Private key {hex(q).upper()} not in [1, n-1]") d['q'] = q d['Q'] = INF d['network'] = _REPEATED_NETWORKS[_PRV_VERSIONS.index(d['version'])] else: # must be public (already checked by _check_version_key) d['q'] = 0 d['Q'] = point_from_octets(d['key'], ec) d['network'] = _REPEATED_NETWORKS[_PUB_VERSIONS.index(d['version'])] return d def serialize(d: XkeyDict) -> bytes: if len(d['key']) != 33: m = f"Invalid {len(d["key"])}-bytes BIP32 'key' length" raise ValueError(m) # version length is checked in _check_version_key _check_version_key(d['version'], d['key']) t = d['version'] if len(d['parent_fingerprint']) != 4: m = f"Invalid {len(d["parent_fingerprint"])}-bytes " m += "BIP32 parent_fingerprint length" raise ValueError(m) if len(d['index']) != 4: m = f"Invalid {len(d["index"])}-bytes BIP32 index length" raise ValueError(m) _check_depth_pfp_index(d['depth'], d['parent_fingerprint'], d['index']) t += d['depth'].to_bytes(1, 'big') t += d['parent_fingerprint'] t += d['index'] if len(d['chain_code']) != 32: m = f"Invalid {len(d["chain_code"])}-bytes BIP32 chain_code length" raise ValueError(m) t += d['chain_code'] # already checked in _check_version_key t += d['key'] # d['q'], d['Q'], and d['network'] are just neglected return b58encode(t) def fingerprint(d: Union[XkeyDict, String]) -> bytes: if not isinstance(d, dict): d = deserialize(d) if d['key'][0] == 0: P = mult(d['q']) pubkey = bytes_from_point(P, True, ec) return hash160(pubkey)[:4] # must be a public key return hash160(d['key'])[:4] def rootxprv_from_seed(seed: Octets, version: Octets = MAIN_xprv) -> bytes: """Return BIP32 root master extended private key from seed.""" seed = bytes_from_octets(seed) hd = hmac.digest(b"Bitcoin seed", seed, 'sha512') k = b'\x00' + hd[:32] v = bytes_from_octets(version) #if v not in _PRV_VERSIONS: # raise ValueError(f"unknown extended private key version {v!r}") network = _REPEATED_NETWORKS[_PRV_VERSIONS.index(v)] d: XkeyDict = { 'version' : v, 'depth' : 0, 'parent_fingerprint' : b'\x00\x00\x00\x00', 'index' : b'\x00\x00\x00\x00', 'chain_code' : hd[32:], 'key' : k, 'q' : int.from_bytes(hd[:32], byteorder='big'), 'Q' : INF, 'network' : network } return serialize(d) def rootxprv_from_bip39mnemonic(mnemonic: Mnemonic, passphrase: str = "", version: Octets = MAIN_xprv) -> bytes: """Return BIP32 root master extended private key from BIP39 mnemonic.""" seed = bip39.seed_from_mnemonic(mnemonic, passphrase) return rootxprv_from_seed(seed, version) def masterxprv_from_electrummnemonic(mnemonic: Mnemonic, passphrase: str = "", network: str = 'mainnet') -> bytes: """Return BIP32 master extended private key from Electrum mnemonic. Note that for a 'standard' mnemonic the derivation path is "m", for a 'segwit' mnemonic it is "m/0h" instead. """ version, seed = electrum._seed_from_mnemonic(mnemonic, passphrase) prefix = _NETWORKS.index(network) if version == 'standard': xversion = _XPRV_PREFIXES[prefix] return rootxprv_from_seed(seed, xversion) elif version == 'segwit': xversion = _P2WPKH_PRV_PREFIXES[prefix] rootxprv = rootxprv_from_seed(seed, xversion) return derive(rootxprv, 0x80000000) # "m/0h" else: raise ValueError(f"Unmanaged electrum mnemonic version ({version})") def xpub_from_xprv(d: Union[XkeyDict, String]) -> bytes: """Neutered Derivation (ND). Derivation of the extended public key corresponding to an extended private key (“neutered” as it removes the ability to sign transactions). """ if isinstance(d, dict): d = copy.copy(d) else: d = deserialize(d) if d['key'][0] != 0: raise ValueError("extended key is not a private one") d['Q'] = mult(d['q']) d['key'] = bytes_from_point(d['Q'], True, ec) d['q'] = 0 d['version'] = _PUB_VERSIONS[_PRV_VERSIONS.index(d['version'])] return serialize(d) def _ckd(d: XkeyDict, index: bytes) -> None: # d is a prvkey if d['key'][0] == 0: d['depth'] += 1 Pbytes = bytes_from_point(mult(d['q']), True, ec) d['parent_fingerprint'] = hash160(Pbytes)[:4] d['index'] = index if index[0] >= 0x80: # hardened derivation h = hmac.digest(d['chain_code'], d['key'] + index, 'sha512') else: # normal derivation h = hmac.digest(d['chain_code'], Pbytes + index, 'sha512') d['chain_code'] = h[32:] offset = int.from_bytes(h[:32], byteorder='big') d['q'] = (d['q'] + offset) % ec.n d['key'] = b'\x00' + d['q'].to_bytes(32, 'big') d['Q'] = INF # d is a pubkey else: if index[0] >= 0x80: raise ValueError("hardened derivation from pubkey is impossible") d['depth'] += 1 d['parent_fingerprint'] = hash160(d['key'])[:4] d['index'] = index h = hmac.digest(d['chain_code'], d['key'] + index, 'sha512') d['chain_code'] = h[32:] offset = int.from_bytes(h[:32], byteorder='big') Offset = mult(offset) d['Q'] = ec.add(d['Q'], Offset) d['key'] = bytes_from_point(d['Q'], True, ec) d['q'] = 0 def _indexes_from_path(path: str) -> Tuple[List[bytes], bool]: steps = path.split('/') if steps[0] in ('m', 'M'): absolute = True elif steps[0] == '.': absolute = False elif steps[0] == '': raise ValueError('Empty derivation path') else: raise ValueError(f'Invalid derivation path root: "{steps[0]}"') if len(steps) > 256: raise ValueError(f'Derivation path depth {len(steps)-1}>255') indexes: List[bytes] = list() for step in steps[1:]: hardened = False if step[-1] in ("'", "H", "h"): hardened = True step = step[:-1] index = int(step) index += 0x80000000 if hardened else 0 indexes.append(index.to_bytes(4, 'big')) return indexes, absolute def derive(d: Union[XkeyDict, String], path: Path) -> bytes: """Derive an extended key across a path spanning multiple depth levels. Derivation is according to: - absolute path as "m/44h/0'/1H/0/10" string - relative path as "./0/10" string - relative path as iterable integer indexes - relative one level child derivation with single integer index - relative one level child derivation with single 4-bytes index """ if isinstance(d, dict): d = copy.copy(d) else: d = deserialize(d) if isinstance(path, str): path = path.strip() indexes, absolute = _indexes_from_path(path) if absolute and d["depth"] != 0: msg = "Absolute derivation path for non-root master key" raise ValueError(msg) elif isinstance(path, int): indexes = [path.to_bytes(4, byteorder='big')] elif isinstance(path, bytes): if len(path) != 4: raise ValueError(f"Index must be 4-bytes, not {len(path)}") indexes = [path] else: indexes = [i.to_bytes(4, byteorder='big') for i in path] final_depth = d["depth"] + len(indexes) if final_depth > 255: raise ValueError(f'Derivation path final depth {final_depth}>255') for index in indexes: _ckd(d, index) return serialize(d) def crack_prvkey(parent_xpub: Union[XkeyDict, String], child_xprv: Union[XkeyDict, String]) -> bytes: if isinstance(parent_xpub, dict): p = copy.copy(parent_xpub) else: p = deserialize(parent_xpub) if p['key'][0] not in (2, 3): raise ValueError("extended parent key is not a public one") if isinstance(child_xprv, dict): c = child_xprv else: c = deserialize(child_xprv) if c['key'][0] != 0: raise ValueError("extended child key is not a private one") # check depth if c['depth'] != p['depth'] + 1: raise ValueError("not a parent's child: wrong depth relation") # check fingerprint if c['parent_fingerprint'] != hash160(p['key'])[:4]: raise ValueError("not a parent's child: wrong parent fingerprint") # check normal derivation if c['index'][0] >= 0x80: raise ValueError("hardened child derivation") p['version'] = c['version'] h = hmac.digest(p['chain_code'], p['key'] + c['index'], 'sha512') offset = int.from_bytes(h[:32], byteorder='big') p['q'] = (c['q'] - offset) % ec.n p['key'] = b'\x00' + p['q'].to_bytes(32, byteorder='big') p['Q'] = INF return serialize(p)
#!/usr/bin/env python3 # Copyright (C) 2017-2020 The btclib developers # # This file is part of btclib. It is subject to the license terms in the # LICENSE file found in the top-level directory of this distribution. # # No part of btclib including this file, may be copied, modified, propagated, # or distributed except according to the terms contained in the LICENSE file. """BIP32 Hierarchical Deterministic Wallet functions. A deterministic wallet is a hash-chain of private/public key pairs that derives from a single root, which is the only element requiring backup. Moreover, there are schemes where public keys can be calculated without accessing private keys. A hierarchical deterministic wallet is a tree of multiple hash-chains, derived from a single root, allowing for selective sharing of keypair chains. Here, the HD wallet is implemented according to BIP32 bitcoin standard https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki. A BIP32 extended key is 78 bytes: - [ : 4] version - [ 4: 5] depth in the derivation path - [ 5: 9] parent fingerprint - [ 9:13] index - [13:45] chain code - [45:78] compressed pubkey or [0x00][prvkey] """ import copy import hmac from typing import Iterable, List, Tuple, TypedDict, Union from . import bip39, electrum from .alias import INF, Octets, Path, Point, String, XkeyDict from .base58 import b58decode, b58encode from .curvemult import mult from .curves import secp256k1 as ec from .mnemonic import Mnemonic from .network import (_NETWORKS, _P2WPKH_P2SH_PRV_PREFIXES, _P2WPKH_P2SH_PUB_PREFIXES, _P2WPKH_PRV_PREFIXES, _P2WPKH_PUB_PREFIXES, _P2WSH_P2SH_PRV_PREFIXES, _P2WSH_P2SH_PUB_PREFIXES, _P2WSH_PRV_PREFIXES, _P2WSH_PUB_PREFIXES, _PRV_VERSIONS, _PUB_VERSIONS, _REPEATED_NETWORKS, _XPRV_PREFIXES, _XPUB_PREFIXES, MAIN_xprv, MAIN_xpub, MAIN_yprv, MAIN_Yprv, MAIN_ypub, MAIN_Ypub, MAIN_zprv, MAIN_Zprv, MAIN_zpub, MAIN_Zpub, TEST_tprv, TEST_tpub, TEST_uprv, TEST_Uprv, TEST_upub, TEST_Upub, TEST_vprv, TEST_Vprv, TEST_vpub, TEST_Vpub) from .secpoint import bytes_from_point, point_from_octets from .utils import bytes_from_octets, hash160 def _check_version_key(v: bytes, k: bytes) -> None: if v in _PRV_VERSIONS: if k[0] != 0: raise ValueError("prv_version/pubkey mismatch") elif v in _PUB_VERSIONS: if k[0] not in (2, 3): raise ValueError("pub_version/prvkey mismatch") else: raise ValueError(f"unknown extended key version {v!r}") def _check_depth_pfp_index(d: int, pfp: bytes, i: bytes) -> None: if d < 0 or d > 255: raise ValueError(f"Invalid BIP32 depth ({d})") elif d == 0: if pfp != b'\x00\x00\x00\x00': m = f"Zero depth with non-zero parent_fingerprint {pfp!r}" raise ValueError(m) if i != b'\x00\x00\x00\x00': m = f"Zero depth with non-zero index {i!r}" raise ValueError(m) else: if pfp == b'\x00\x00\x00\x00': m = f"Zon-zero depth ({d}) with zero parent_fingerprint {pfp!r}" raise ValueError(m) def deserialize(xkey: Octets) -> XkeyDict: if isinstance(xkey, str): xkey = xkey.strip() xkey = b58decode(xkey, 78) d: XkeyDict = { 'version' : xkey[:4], 'depth' : xkey[4], 'parent_fingerprint' : xkey[5:9], 'index' : xkey[9:13], 'chain_code' : xkey[13:45], 'key' : xkey[45:], # extensions 'q' : 0, # non zero only if xprv 'Q' : INF, # non INF only if xpub 'network' : '' } _check_version_key(d['version'], d['key']) _check_depth_pfp_index(d['depth'], d['parent_fingerprint'], d['index']) # calculate d['q'] and d['Q'] if d['key'][0] == 0: q = int.from_bytes(d['key'][1:], byteorder='big') if not 0 < q < ec.n: raise ValueError(f"Private key {hex(q).upper()} not in [1, n-1]") d['q'] = q d['Q'] = INF d['network'] = _REPEATED_NETWORKS[_PRV_VERSIONS.index(d['version'])] else: # must be public (already checked by _check_version_key) d['q'] = 0 d['Q'] = point_from_octets(d['key'], ec) d['network'] = _REPEATED_NETWORKS[_PUB_VERSIONS.index(d['version'])] return d def serialize(d: XkeyDict) -> bytes: if len(d['key']) != 33: m = f"Invalid {len(d['key'])}-bytes BIP32 'key' length" raise ValueError(m) # version length is checked in _check_version_key _check_version_key(d['version'], d['key']) t = d['version'] if len(d['parent_fingerprint']) != 4: m = f"Invalid {len(d['parent_fingerprint'])}-bytes " m += "BIP32 parent_fingerprint length" raise ValueError(m) if len(d['index']) != 4: m = f"Invalid {len(d['index'])}-bytes BIP32 index length" raise ValueError(m) _check_depth_pfp_index(d['depth'], d['parent_fingerprint'], d['index']) t += d['depth'].to_bytes(1, 'big') t += d['parent_fingerprint'] t += d['index'] if len(d['chain_code']) != 32: m = f"Invalid {len(d['chain_code'])}-bytes BIP32 chain_code length" raise ValueError(m) t += d['chain_code'] # already checked in _check_version_key t += d['key'] # d['q'], d['Q'], and d['network'] are just neglected return b58encode(t) def fingerprint(d: Union[XkeyDict, String]) -> bytes: if not isinstance(d, dict): d = deserialize(d) if d['key'][0] == 0: P = mult(d['q']) pubkey = bytes_from_point(P, True, ec) return hash160(pubkey)[:4] # must be a public key return hash160(d['key'])[:4] def rootxprv_from_seed(seed: Octets, version: Octets = MAIN_xprv) -> bytes: """Return BIP32 root master extended private key from seed.""" seed = bytes_from_octets(seed) hd = hmac.digest(b"Bitcoin seed", seed, 'sha512') k = b'\x00' + hd[:32] v = bytes_from_octets(version) #if v not in _PRV_VERSIONS: # raise ValueError(f"unknown extended private key version {v!r}") network = _REPEATED_NETWORKS[_PRV_VERSIONS.index(v)] d: XkeyDict = { 'version' : v, 'depth' : 0, 'parent_fingerprint' : b'\x00\x00\x00\x00', 'index' : b'\x00\x00\x00\x00', 'chain_code' : hd[32:], 'key' : k, 'q' : int.from_bytes(hd[:32], byteorder='big'), 'Q' : INF, 'network' : network } return serialize(d) def rootxprv_from_bip39mnemonic(mnemonic: Mnemonic, passphrase: str = "", version: Octets = MAIN_xprv) -> bytes: """Return BIP32 root master extended private key from BIP39 mnemonic.""" seed = bip39.seed_from_mnemonic(mnemonic, passphrase) return rootxprv_from_seed(seed, version) def masterxprv_from_electrummnemonic(mnemonic: Mnemonic, passphrase: str = "", network: str = 'mainnet') -> bytes: """Return BIP32 master extended private key from Electrum mnemonic. Note that for a 'standard' mnemonic the derivation path is "m", for a 'segwit' mnemonic it is "m/0h" instead. """ version, seed = electrum._seed_from_mnemonic(mnemonic, passphrase) prefix = _NETWORKS.index(network) if version == 'standard': xversion = _XPRV_PREFIXES[prefix] return rootxprv_from_seed(seed, xversion) elif version == 'segwit': xversion = _P2WPKH_PRV_PREFIXES[prefix] rootxprv = rootxprv_from_seed(seed, xversion) return derive(rootxprv, 0x80000000) # "m/0h" else: raise ValueError(f"Unmanaged electrum mnemonic version ({version})") def xpub_from_xprv(d: Union[XkeyDict, String]) -> bytes: """Neutered Derivation (ND). Derivation of the extended public key corresponding to an extended private key (“neutered” as it removes the ability to sign transactions). """ if isinstance(d, dict): d = copy.copy(d) else: d = deserialize(d) if d['key'][0] != 0: raise ValueError("extended key is not a private one") d['Q'] = mult(d['q']) d['key'] = bytes_from_point(d['Q'], True, ec) d['q'] = 0 d['version'] = _PUB_VERSIONS[_PRV_VERSIONS.index(d['version'])] return serialize(d) def _ckd(d: XkeyDict, index: bytes) -> None: # d is a prvkey if d['key'][0] == 0: d['depth'] += 1 Pbytes = bytes_from_point(mult(d['q']), True, ec) d['parent_fingerprint'] = hash160(Pbytes)[:4] d['index'] = index if index[0] >= 0x80: # hardened derivation h = hmac.digest(d['chain_code'], d['key'] + index, 'sha512') else: # normal derivation h = hmac.digest(d['chain_code'], Pbytes + index, 'sha512') d['chain_code'] = h[32:] offset = int.from_bytes(h[:32], byteorder='big') d['q'] = (d['q'] + offset) % ec.n d['key'] = b'\x00' + d['q'].to_bytes(32, 'big') d['Q'] = INF # d is a pubkey else: if index[0] >= 0x80: raise ValueError("hardened derivation from pubkey is impossible") d['depth'] += 1 d['parent_fingerprint'] = hash160(d['key'])[:4] d['index'] = index h = hmac.digest(d['chain_code'], d['key'] + index, 'sha512') d['chain_code'] = h[32:] offset = int.from_bytes(h[:32], byteorder='big') Offset = mult(offset) d['Q'] = ec.add(d['Q'], Offset) d['key'] = bytes_from_point(d['Q'], True, ec) d['q'] = 0 def _indexes_from_path(path: str) -> Tuple[List[bytes], bool]: steps = path.split('/') if steps[0] in ('m', 'M'): absolute = True elif steps[0] == '.': absolute = False elif steps[0] == '': raise ValueError('Empty derivation path') else: raise ValueError(f'Invalid derivation path root: "{steps[0]}"') if len(steps) > 256: raise ValueError(f'Derivation path depth {len(steps)-1}>255') indexes: List[bytes] = list() for step in steps[1:]: hardened = False if step[-1] in ("'", "H", "h"): hardened = True step = step[:-1] index = int(step) index += 0x80000000 if hardened else 0 indexes.append(index.to_bytes(4, 'big')) return indexes, absolute def derive(d: Union[XkeyDict, String], path: Path) -> bytes: """Derive an extended key across a path spanning multiple depth levels. Derivation is according to: - absolute path as "m/44h/0'/1H/0/10" string - relative path as "./0/10" string - relative path as iterable integer indexes - relative one level child derivation with single integer index - relative one level child derivation with single 4-bytes index """ if isinstance(d, dict): d = copy.copy(d) else: d = deserialize(d) if isinstance(path, str): path = path.strip() indexes, absolute = _indexes_from_path(path) if absolute and d["depth"] != 0: msg = "Absolute derivation path for non-root master key" raise ValueError(msg) elif isinstance(path, int): indexes = [path.to_bytes(4, byteorder='big')] elif isinstance(path, bytes): if len(path) != 4: raise ValueError(f"Index must be 4-bytes, not {len(path)}") indexes = [path] else: indexes = [i.to_bytes(4, byteorder='big') for i in path] final_depth = d["depth"] + len(indexes) if final_depth > 255: raise ValueError(f'Derivation path final depth {final_depth}>255') for index in indexes: _ckd(d, index) return serialize(d) def crack_prvkey(parent_xpub: Union[XkeyDict, String], child_xprv: Union[XkeyDict, String]) -> bytes: if isinstance(parent_xpub, dict): p = copy.copy(parent_xpub) else: p = deserialize(parent_xpub) if p['key'][0] not in (2, 3): raise ValueError("extended parent key is not a public one") if isinstance(child_xprv, dict): c = child_xprv else: c = deserialize(child_xprv) if c['key'][0] != 0: raise ValueError("extended child key is not a private one") # check depth if c['depth'] != p['depth'] + 1: raise ValueError("not a parent's child: wrong depth relation") # check fingerprint if c['parent_fingerprint'] != hash160(p['key'])[:4]: raise ValueError("not a parent's child: wrong parent fingerprint") # check normal derivation if c['index'][0] >= 0x80: raise ValueError("hardened child derivation") p['version'] = c['version'] h = hmac.digest(p['chain_code'], p['key'] + c['index'], 'sha512') offset = int.from_bytes(h[:32], byteorder='big') p['q'] = (c['q'] - offset) % ec.n p['key'] = b'\x00' + p['q'].to_bytes(32, byteorder='big') p['Q'] = INF return serialize(p)
import argparse, time, re, asyncio, functools, base64, random, urllib.parse, socket from . import proto from .__doc__ import * SOCKET_TIMEOUT = 300 PACKET_SIZE = 65536 UDP_LIMIT = 30 DUMMY = lambda s: s asyncio.StreamReader.read_ = lambda self: self.read(PACKET_SIZE) asyncio.StreamReader.read_n = lambda self, n: asyncio.wait_for(self.readexactly(n), timeout=SOCKET_TIMEOUT) asyncio.StreamReader.read_until = lambda self, s: asyncio.wait_for(self.readuntil(s), timeout=SOCKET_TIMEOUT) class AuthTable(object): _auth = {} def __init__(self, remote_ip, authtime): self.remote_ip = remote_ip self.authtime = authtime def authed(self): return time.time() - self._auth.get(self.remote_ip, 0) <= self.authtime def set_authed(self): self._auth[self.remote_ip] = time.time() async def prepare_ciphers(cipher, reader, writer, bind=None, server_side=True): if cipher: cipher.pdecrypt = cipher.pdecrypt2 = cipher.pencrypt = cipher.pencrypt2 = DUMMY for plugin in cipher.plugins: if server_side: await plugin.init_server_data(reader, writer, cipher, bind) else: await plugin.init_client_data(reader, writer, cipher) plugin.add_cipher(cipher) return cipher(reader, writer, cipher.pdecrypt, cipher.pdecrypt2, cipher.pencrypt, cipher.pencrypt2) else: return None, None def schedule(rserver, salgorithm, host_name, port): filter_cond = lambda o: o.alive and (not o.match or o.match(host_name) or o.match(str(port))) if salgorithm == 'fa': return next(filter(filter_cond, rserver), None) elif salgorithm == 'rr': for i, roption in enumerate(rserver): if filter_cond(roption): rserver.append(rserver.pop(i)) return roption elif salgorithm == 'rc': filters = [i for i in rserver if filter_cond(i)] return random.choice(filters) if filters else None elif salgorithm == 'lc': return min(filter(filter_cond, rserver), default=None, key=lambda i: i.total) else: raise Exception('Unknown scheduling algorithm') #Unreachable async def stream_handler(reader, writer, unix, lbind, protos, rserver, cipher, sslserver, authtime=86400*30, block=None, salgorithm='fa', verbose=DUMMY, modstat=lambda r,h:lambda i:DUMMY, **kwargs): try: reader, writer = proto.sslwrap(reader, writer, sslserver, True, None, verbose) if unix: remote_ip, server_ip, remote_text = 'local', None, 'unix_local' else: remote_ip, remote_port, *_ = writer.get_extra_info('peername') server_ip = writer.get_extra_info('sockname')[0] remote_text = f'{remote_ip}:{remote_port}' local_addr = None if server_ip in ('127.0.0.1', '::1', None) else (server_ip, 0) reader_cipher, _ = await prepare_ciphers(cipher, reader, writer, server_side=False) lproto, host_name, port, lbuf, rbuf = await proto.parse(protos, reader=reader, writer=writer, authtable=AuthTable(remote_ip, authtime), reader_cipher=reader_cipher, sock=writer.get_extra_info('socket'), **kwargs) if host_name == 'echo': asyncio.ensure_future(lproto.channel(reader, writer, DUMMY, DUMMY)) elif host_name == 'empty': asyncio.ensure_future(lproto.channel(reader, writer, None, DUMMY)) elif block and block(host_name): raise Exception('BLOCK ' + host_name) else: roption = schedule(rserver, salgorithm, host_name, port) or ProxyURI.DIRECT verbose(f'{lproto.name} {remote_text}{roption.logtext(host_name, port)}') try: reader_remote, writer_remote = await roption.open_connection(host_name, port, local_addr, lbind) except asyncio.TimeoutError: raise Exception(f'Connection timeout {roption.bind}') try: reader_remote, writer_remote = await roption.prepare_connection(reader_remote, writer_remote, host_name, port) writer.write(lbuf) writer_remote.write(rbuf) except Exception: writer_remote.close() raise Exception('Unknown remote protocol') m = modstat(remote_ip, host_name) lchannel = lproto.http_channel if rbuf else lproto.channel asyncio.ensure_future(lproto.channel(reader_remote, writer, m(2+roption.direct), m(4+roption.direct))) asyncio.ensure_future(lchannel(reader, writer_remote, m(roption.direct), roption.connection_change)) except Exception as ex: if not isinstance(ex, asyncio.TimeoutError) and not str(ex).startswith('Connection closed'): verbose(f'{str(ex) or 'Unsupported protocol'} from {remote_ip}') try: writer.close() except Exception: pass async def reuse_stream_handler(reader, writer, unix, lbind, protos, rserver, urserver, block, cipher, salgorithm, verbose=DUMMY, modstat=lambda r,h:lambda i:DUMMY, **kwargs): try: if unix: remote_ip, server_ip, remote_text = 'local', None, 'unix_local' else: remote_ip, remote_port, *_ = writer.get_extra_info('peername') server_ip = writer.get_extra_info('sockname')[0] remote_text = f'{remote_ip}:{remote_port}' local_addr = None if server_ip in ('127.0.0.1', '::1', None) else (server_ip, 0) reader_cipher, _ = await prepare_ciphers(cipher, reader, writer, server_side=False) lproto = protos[0] except Exception as ex: verbose(f'{str(ex) or 'Unsupported protocol'} from {remote_ip}') async def tcp_handler(reader, writer, host_name, port): try: if block and block(host_name): raise Exception('BLOCK ' + host_name) roption = schedule(rserver, salgorithm, host_name, port) or ProxyURI.DIRECT verbose(f'{lproto.name} {remote_text}{roption.logtext(host_name, port)}') try: reader_remote, writer_remote = await roption.open_connection(host_name, port, local_addr, lbind) except asyncio.TimeoutError: raise Exception(f'Connection timeout {roption.bind}') try: reader_remote, writer_remote = await roption.prepare_connection(reader_remote, writer_remote, host_name, port) except Exception: writer_remote.close() raise Exception('Unknown remote protocol') m = modstat(remote_ip, host_name) asyncio.ensure_future(lproto.channel(reader_remote, writer, m(2+roption.direct), m(4+roption.direct))) asyncio.ensure_future(lproto.channel(reader, writer_remote, m(roption.direct), roption.connection_change)) except Exception as ex: if not isinstance(ex, asyncio.TimeoutError) and not str(ex).startswith('Connection closed'): verbose(f'{str(ex) or 'Unsupported protocol'} from {remote_ip}') try: writer.close() except Exception: pass async def udp_handler(sendto, data, host_name, port, sid): try: if block and block(host_name): raise Exception('BLOCK ' + host_name) roption = schedule(urserver, salgorithm, host_name, port) or ProxyURI.DIRECT verbose(f'UDP {lproto.name} {remote_text}{roption.logtext(host_name, port)}') data = roption.prepare_udp_connection(host_name, port, data) await roption.open_udp_connection(host_name, port, data, sid, sendto) except Exception as ex: if not str(ex).startswith('Connection closed'): verbose(f'{str(ex) or 'Unsupported protocol'} from {remote_ip}') lproto.get_handler(reader, writer, verbose, tcp_handler, udp_handler) async def datagram_handler(writer, data, addr, protos, urserver, block, cipher, salgorithm, verbose=DUMMY, **kwargs): try: remote_ip, remote_port, *_ = addr remote_text = f'{remote_ip}:{remote_port}' data = cipher.datagram.decrypt(data) if cipher else data lproto, host_name, port, data = proto.udp_parse(protos, data, sock=writer.get_extra_info('socket'), **kwargs) if host_name == 'echo': writer.sendto(data, addr) elif host_name == 'empty': pass elif block and block(host_name): raise Exception('BLOCK ' + host_name) else: roption = schedule(urserver, salgorithm, host_name, port) or ProxyURI.DIRECT verbose(f'UDP {lproto.name} {remote_text}{roption.logtext(host_name, port)}') data = roption.prepare_udp_connection(host_name, port, data) def reply(rdata): rdata = lproto.udp_client2(host_name, port, rdata) writer.sendto(cipher.datagram.encrypt(rdata) if cipher else rdata, addr) await roption.open_udp_connection(host_name, port, data, addr, reply) except Exception as ex: if not str(ex).startswith('Connection closed'): verbose(f'{str(ex) or 'Unsupported protocol'} from {remote_ip}') async def check_server_alive(interval, rserver, verbose): while True: await asyncio.sleep(interval) for remote in rserver: if remote.direct: continue try: _, writer = await remote.open_connection(None, None, None, None, timeout=3) except asyncio.CancelledError as ex: return except Exception as ex: if remote.alive: verbose(f'{remote.rproto.name} {remote.bind} -> OFFLINE') remote.alive = False continue if not remote.alive: verbose(f'{remote.rproto.name} {remote.bind} -> ONLINE') remote.alive = True try: if remote.backward: writer.write(b'\x00') writer.close() except Exception: pass class BackwardConnection(object): def __init__(self, uri, count): self.uri = uri self.count = count self.closed = False self.conn = asyncio.Queue() async def open_connection(self): while True: reader, writer = await self.conn.get() if not writer.transport.is_closing(): return reader, writer def close(self): self.closed = True try: self.writer.close() except Exception: pass async def start_server(self, handler): for _ in range(self.count): asyncio.ensure_future(self.server_run(handler)) return self async def server_run(self, handler): errwait = 0 while not self.closed: if self.uri.unix: wait = asyncio.open_unix_connection(path=self.uri.bind) else: wait = asyncio.open_connection(host=self.uri.host_name, port=self.uri.port, local_addr=(self.uri.lbind, 0) if self.uri.lbind else None) try: reader, writer = await asyncio.wait_for(wait, timeout=SOCKET_TIMEOUT) writer.write(self.uri.auth) self.writer = writer try: data = await reader.read_n(1) except asyncio.TimeoutError: data = None if data and data[0] != 0: reader._buffer[0:0] = data asyncio.ensure_future(handler(reader, writer)) else: writer.close() errwait = 0 except Exception as ex: try: writer.close() except Exception: pass if not self.closed: await asyncio.sleep(errwait) errwait = min(errwait*1.3 + 0.1, 30) def client_run(self, args): async def handler(reader, writer): if self.uri.auth: try: assert self.uri.auth == (await reader.read_n(len(self.uri.auth))) except Exception: return await self.conn.put((reader, writer)) if self.uri.unix: return asyncio.start_unix_server(handler, path=self.uri.bind) else: return asyncio.start_server(handler, host=self.uri.host_name, port=self.uri.port, reuse_port=args.get('ruport')) class ProxyURI(object): def __init__(self, **kw): self.__dict__.update(kw) self.total = 0 self.udpmap = {} self.handler = None self.streams = None if self.backward: self.backward = BackwardConnection(self, self.backward) def logtext(self, host, port): if self.direct: return f' -> {host}:{port}' elif self.tunnel: return f' ->{(' ssl' if self.sslclient else '')} {self.bind}' else: return f' -> {self.rproto.name+('+ssl' if self.sslclient else '')} {self.bind}' + self.relay.logtext(host, port) def connection_change(self, delta): self.total += delta async def open_udp_connection(self, host, port, data, addr, reply): class Protocol(asyncio.DatagramProtocol): def __init__(prot, data): self.udpmap[addr] = prot prot.databuf = [data] prot.transport = None prot.update = 0 def connection_made(prot, transport): prot.transport = transport for data in prot.databuf: transport.sendto(data) prot.databuf.clear() prot.update = time.perf_counter() def new_data_arrived(prot, data): if prot.transport: prot.transport.sendto(data) else: prot.databuf.append(data) prot.update = time.perf_counter() def datagram_received(prot, data, addr): data = self.cipher.datagram.decrypt(data) if self.cipher else data data = self.rproto.udp_client(data) if not self.direct else data reply(data) prot.update = time.perf_counter() def connection_lost(prot, exc): self.udpmap.pop(addr, None) if addr in self.udpmap: self.udpmap[addr].new_data_arrived(data) else: if self.direct and host == 'tunnel': raise Exception('Unknown tunnel endpoint') self.connection_change(1) if len(self.udpmap) > UDP_LIMIT: min_addr = min(self.udpmap, key=lambda x: self.udpmap[x].update) prot = self.udpmap.pop(min_addr) if prot.transport: prot.transport.close() prot = Protocol(data) remote_addr = (host, port) if self.direct else (self.host_name, self.port) await asyncio.get_event_loop().create_datagram_endpoint(lambda: prot, remote_addr=remote_addr) def prepare_udp_connection(self, host, port, data): if not self.direct: data = self.relay.prepare_udp_connection(host, port, data) whost, wport = (host, port) if self.relay.direct else (self.relay.host_name, self.relay.port) data = self.rproto.udp_connect(rauth=self.auth, host_name=whost, port=wport, data=data) if self.cipher: data = self.cipher.datagram.encrypt(data) return data def start_udp_server(self, args): class Protocol(asyncio.DatagramProtocol): def connection_made(prot, transport): prot.transport = transport def datagram_received(prot, data, addr): asyncio.ensure_future(datagram_handler(prot.transport, data, addr, **vars(self), **args)) return asyncio.get_event_loop().create_datagram_endpoint(Protocol, local_addr=(self.host_name, self.port)) async def open_connection(self, host, port, local_addr, lbind, timeout=SOCKET_TIMEOUT): if self.reuse or self.ssh: if self.streams is None or self.streams.done() and (self.reuse and not self.handler): self.streams = asyncio.get_event_loop().create_future() else: if not self.streams.done(): await self.streams return self.streams.result() try: local_addr = local_addr if self.lbind == 'in' else (self.lbind, 0) if self.lbind else \ local_addr if lbind == 'in' else (lbind, 0) if lbind else None family = 0 if local_addr is None else socket.AF_INET6 if ':' in local_addr[0] else socket.AF_INET if self.direct: if host == 'tunnel': raise Exception('Unknown tunnel endpoint') wait = asyncio.open_connection(host=host, port=port, local_addr=local_addr, family=family) elif self.ssh: try: import asyncssh for s in ('read_', 'read_n', 'read_until'): setattr(asyncssh.SSHReader, s, getattr(asyncio.StreamReader, s)) except Exception: raise Exception('Missing library: "pip3 install asyncssh"') username, password = self.auth.decode().split(':', 1) if password.startswith(':'): client_keys = [password[1:]] password = None else: client_keys = None conn = await asyncssh.connect(host=self.host_name, port=self.port, local_addr=local_addr, family=family, x509_trusted_certs=None, known_hosts=None, username=username, password=password, client_keys=client_keys, keepalive_interval=60) if not self.streams.done(): self.streams.set_result((conn, None)) return conn, None elif self.backward: wait = self.backward.open_connection() elif self.unix: wait = asyncio.open_unix_connection(path=self.bind) else: wait = asyncio.open_connection(host=self.host_name, port=self.port, local_addr=local_addr, family=family) reader, writer = await asyncio.wait_for(wait, timeout=timeout) except Exception as ex: if self.reuse: self.streams.set_exception(ex) self.streams = None raise return reader, writer def prepare_connection(self, reader_remote, writer_remote, host, port): if self.reuse and not self.handler: self.handler = self.rproto.get_handler(reader_remote, writer_remote, DUMMY) return self.prepare_ciphers_and_headers(reader_remote, writer_remote, host, port, self.handler) async def prepare_ciphers_and_headers(self, reader_remote, writer_remote, host, port, handler): if not self.direct: reader_remote, writer_remote = proto.sslwrap(reader_remote, writer_remote, self.sslclient, False, self.host_name) if not handler or not handler.ready: _, writer_cipher_r = await prepare_ciphers(self.cipher, reader_remote, writer_remote, self.bind) else: writer_cipher_r = None whost, wport = (host, port) if self.relay.direct else (self.relay.host_name, self.relay.port) if self.rproto.reuse(): if not self.streams.done(): self.streams.set_result((reader_remote, writer_remote)) reader_remote, writer_remote = handler.connect(whost, wport) elif self.ssh: reader_remote, writer_remote = await reader_remote.open_connection(whost, wport) else: await self.rproto.connect(reader_remote=reader_remote, writer_remote=writer_remote, rauth=self.auth, host_name=whost, port=wport, writer_cipher_r=writer_cipher_r, myhost=self.host_name, sock=writer_remote.get_extra_info('socket')) return await self.relay.prepare_ciphers_and_headers(reader_remote, writer_remote, host, port, handler) return reader_remote, writer_remote def start_server(self, args): handler = functools.partial(reuse_stream_handler if self.reuse else stream_handler, **vars(self), **args) if self.backward: return self.backward.start_server(handler) elif self.unix: return asyncio.start_unix_server(handler, path=self.bind) else: return asyncio.start_server(handler, host=self.host_name, port=self.port, reuse_port=args.get('ruport')) async def tcp_connect(self, host, port, local_addr=None, lbind=None): reader, writer = await self.open_connection(host, port, local_addr, lbind) try: reader, writer = await self.prepare_connection(reader, writer, host, port) except Exception: writer.close() raise return reader, writer async def udp_sendto(self, host, port, data, answer_cb, local_addr=None): if local_addr is None: local_addr = random.randrange(2**32) data = self.prepare_udp_connection(host, port, data) await self.open_udp_connection(host, port, data, local_addr, answer_cb) @classmethod def compile_rule(cls, filename): if filename.startswith("{") and filename.endswith("}"): return re.compile(filename[1:-1]).match with open(filename) as f: return re.compile('(:?'+''.join('|'.join(i.strip() for i in f if i.strip() and not i.startswith('#')))+')$').match @classmethod def compile_relay(cls, uri): tail = cls.DIRECT for urip in reversed(uri.split('__')): tail = cls.compile(urip, tail) return tail @classmethod def compile(cls, uri, relay=None): scheme, _, uri = uri.partition('://') url = urllib.parse.urlparse('s://'+uri) rawprotos = scheme.split('+') err_str, protos = proto.get_protos(rawprotos) if err_str: raise argparse.ArgumentTypeError(err_str) if 'ssl' in rawprotos or 'secure' in rawprotos: import ssl sslserver = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) sslclient = ssl.create_default_context(ssl.Purpose.SERVER_AUTH) if 'ssl' in rawprotos: sslclient.check_hostname = False sslclient.verify_mode = ssl.CERT_NONE else: sslserver = sslclient = None protonames = [i.name for i in protos] if 'pack' in protonames and relay and relay != cls.DIRECT: raise argparse.ArgumentTypeError('pack protocol cannot relay to other proxy') urlpath, _, plugins = url.path.partition(',') urlpath, _, lbind = urlpath.partition('@') plugins = plugins.split(',') if plugins else None cipher, _, loc = url.netloc.rpartition('@') if cipher: from .cipher import get_cipher if ':' not in cipher: try: cipher = base64.b64decode(cipher).decode() except Exception: pass if ':' not in cipher: raise argparse.ArgumentTypeError('userinfo must be "cipher:key"') err_str, cipher = get_cipher(cipher) if err_str: raise argparse.ArgumentTypeError(err_str) if plugins: from .plugin import get_plugin for name in plugins: if not name: continue err_str, plugin = get_plugin(name) if err_str: raise argparse.ArgumentTypeError(err_str) cipher.plugins.append(plugin) match = cls.compile_rule(url.query) if url.query else None if loc: host_name, _, port = loc.partition(':') port = int(port) if port else (22 if 'ssh' in rawprotos else 8080) else: host_name = port = None return ProxyURI(protos=protos, rproto=protos[0], cipher=cipher, auth=url.fragment.encode(), \ match=match, bind=loc or urlpath, host_name=host_name, port=port, \ unix=not loc, lbind=lbind, sslclient=sslclient, sslserver=sslserver, \ alive=True, direct='direct' in protonames, tunnel='tunnel' in protonames, \ reuse='pack' in protonames or relay and relay.reuse, backward=rawprotos.count('in'), \ ssh='ssh' in rawprotos, relay=relay) ProxyURI.DIRECT = ProxyURI(direct=True, tunnel=False, reuse=False, relay=None, alive=True, match=None, cipher=None, backward=None, ssh=None, lbind=None) async def test_url(url, rserver): url = urllib.parse.urlparse(url) assert url.scheme in ('http', 'https'), f'Unknown scheme {url.scheme}' host_name, _, port = url.netloc.partition(':') port = int(port) if port else 80 if url.scheme == 'http' else 443 initbuf = f'GET {url.path or '/'} HTTP/1.1\r\nHost: {host_name}\r\nUser-Agent: pproxy-{__version__}\r\nAccept: */*\r\nConnection: close\r\n\r\n'.encode() for roption in rserver: print(f'============ {roption.bind} ============') try: reader, writer = await roption.open_connection(host_name, port, None, None) except asyncio.TimeoutError: raise Exception(f'Connection timeout {rserver}') try: reader, writer = await roption.prepare_connection(reader, writer, host_name, port) except Exception: writer.close() raise Exception('Unknown remote protocol') if url.scheme == 'https': import ssl sslclient = ssl.create_default_context(ssl.Purpose.SERVER_AUTH) sslclient.check_hostname = False sslclient.verify_mode = ssl.CERT_NONE reader, writer = proto.sslwrap(reader, writer, sslclient, False, host_name) writer.write(initbuf) headers = await reader.read_until(b'\r\n\r\n') print(headers.decode()[:-4]) print(f'--------------------------------') body = bytearray() while 1: s = await reader.read_() if not s: break body.extend(s) print(body.decode('utf8', 'ignore')) print(f'============ success ============') def main(): parser = argparse.ArgumentParser(description=__description__+'\nSupported protocols: http,socks4,socks5,shadowsocks,shadowsocksr,redirect,pf,tunnel', epilog=f'Online help: <{__url__}>') parser.add_argument('-l', dest='listen', default=[], action='append', type=ProxyURI.compile, help='tcp server uri (default: http+socks4+socks5://:8080/)') parser.add_argument('-r', dest='rserver', default=[], action='append', type=ProxyURI.compile_relay, help='tcp remote server uri (default: direct)') parser.add_argument('-ul', dest='ulisten', default=[], action='append', type=ProxyURI.compile, help='udp server setting uri (default: none)') parser.add_argument('-ur', dest='urserver', default=[], action='append', type=ProxyURI.compile_relay, help='udp remote server uri (default: direct)') parser.add_argument('-b', dest='block', type=ProxyURI.compile_rule, help='block regex rules') parser.add_argument('-a', dest='alived', default=0, type=int, help='interval to check remote alive (default: no check)') parser.add_argument('-s', dest='salgorithm', default='fa', choices=('fa', 'rr', 'rc', 'lc'), help='scheduling algorithm (default: first_available)') parser.add_argument('-v', dest='v', action='count', help='print verbose output') parser.add_argument('--ssl', dest='sslfile', help='certfile[,keyfile] if server listen in ssl mode') parser.add_argument('--pac', help='http PAC path') parser.add_argument('--get', dest='gets', default=[], action='append', help='http custom {path,file}') parser.add_argument('--auth', dest='authtime', type=int, default=86400*30, help='re-auth time interval for same ip (default: 86400*30)') parser.add_argument('--sys', action='store_true', help='change system proxy setting (mac, windows)') parser.add_argument('--reuse', dest='ruport', action='store_true', help='set SO_REUSEPORT (Linux only)') parser.add_argument('--daemon', dest='daemon', action='store_true', help='run as a daemon (Linux only)') parser.add_argument('--test', help='test this url for all remote proxies and exit') parser.add_argument('--version', action='version', version=f'%(prog)s {__version__}') args = parser.parse_args() if args.test: asyncio.get_event_loop().run_until_complete(test_url(args.test, args.rserver)) return if not args.listen and not args.ulisten: args.listen.append(ProxyURI.compile_relay('http+socks4+socks5://:8080/')) args.httpget = {} if args.pac: pactext = 'function FindProxyForURL(u,h){' + (f'var b=/^(:?{args.block.__self__.pattern})$/i;if(b.test(h))return "";' if args.block else '') for i, option in enumerate(args.rserver): pactext += (f'var m{i}=/^(:?{option.match.__self__.pattern})$/i;if(m{i}.test(h))' if option.match else '') + 'return "PROXY %(host)s";' args.httpget[args.pac] = pactext+'return "DIRECT";}' args.httpget[args.pac+'/all'] = 'function FindProxyForURL(u,h){return "PROXY %(host)s";}' args.httpget[args.pac+'/none'] = 'function FindProxyForURL(u,h){return "DIRECT";}' for gets in args.gets: path, filename = gets.split(',', 1) with open(filename, 'rb') as f: args.httpget[path] = f.read() if args.sslfile: sslfile = args.sslfile.split(',') for option in args.listen: if option.sslclient: option.sslclient.load_cert_chain(*sslfile) option.sslserver.load_cert_chain(*sslfile) elif any(map(lambda o: o.sslclient, args.listen)): print('You must specify --ssl to listen in ssl mode') return if args.daemon: try: __import__('daemon').DaemonContext().open() except ModuleNotFoundError: print("Missing library: pip3 install python-daemon") return # Try to use uvloop instead of the default event loop try: __import__('uvloop').install() print('Using uvloop') except ModuleNotFoundError: pass loop = asyncio.get_event_loop() if args.v: from . import verbose verbose.setup(loop, args) servers = [] for option in args.listen: print('Serving on', option.bind, 'by', ",".join(i.name for i in option.protos) + ('(SSL)' if option.sslclient else ''), '({}{})'.format(option.cipher.name, ' '+','.join(i.name() for i in option.cipher.plugins) if option.cipher and option.cipher.plugins else '') if option.cipher else '') try: server = loop.run_until_complete(option.start_server(vars(args))) servers.append(server) except Exception as ex: print('Start server failed.\n\t==>', ex) for option in args.ulisten: print('Serving on UDP', option.bind, 'by', ",".join(i.name for i in option.protos), f'({option.cipher.name})' if option.cipher else '') try: server, protocol = loop.run_until_complete(option.start_udp_server(vars(args))) servers.append(server) except Exception as ex: print('Start server failed.\n\t==>', ex) for option in args.rserver: if option.backward: print('Serving on', option.bind, 'backward by', ",".join(i.name for i in option.protos) + ('(SSL)' if option.sslclient else ''), '({}{})'.format(option.cipher.name, ' '+','.join(i.name() for i in option.cipher.plugins) if option.cipher and option.cipher.plugins else '') if option.cipher else '') try: server = loop.run_until_complete(option.backward.client_run(vars(args))) servers.append(server) except Exception as ex: print('Start server failed.\n\t==>', ex) if servers: if args.sys: from . import sysproxy args.sys = sysproxy.setup(args) if args.alived > 0 and args.rserver: asyncio.ensure_future(check_server_alive(args.alived, args.rserver, args.verbose if args.v else DUMMY)) try: loop.run_forever() except KeyboardInterrupt: print('exit') if args.sys: args.sys.clear() for task in asyncio.Task.all_tasks(): task.cancel() for server in servers: server.close() for server in servers: if hasattr(server, 'wait_closed'): loop.run_until_complete(server.wait_closed()) loop.run_until_complete(loop.shutdown_asyncgens()) loop.close() if __name__ == '__main__': main()
import argparse, time, re, asyncio, functools, base64, random, urllib.parse, socket from . import proto from .__doc__ import * SOCKET_TIMEOUT = 300 PACKET_SIZE = 65536 UDP_LIMIT = 30 DUMMY = lambda s: s asyncio.StreamReader.read_ = lambda self: self.read(PACKET_SIZE) asyncio.StreamReader.read_n = lambda self, n: asyncio.wait_for(self.readexactly(n), timeout=SOCKET_TIMEOUT) asyncio.StreamReader.read_until = lambda self, s: asyncio.wait_for(self.readuntil(s), timeout=SOCKET_TIMEOUT) class AuthTable(object): _auth = {} def __init__(self, remote_ip, authtime): self.remote_ip = remote_ip self.authtime = authtime def authed(self): return time.time() - self._auth.get(self.remote_ip, 0) <= self.authtime def set_authed(self): self._auth[self.remote_ip] = time.time() async def prepare_ciphers(cipher, reader, writer, bind=None, server_side=True): if cipher: cipher.pdecrypt = cipher.pdecrypt2 = cipher.pencrypt = cipher.pencrypt2 = DUMMY for plugin in cipher.plugins: if server_side: await plugin.init_server_data(reader, writer, cipher, bind) else: await plugin.init_client_data(reader, writer, cipher) plugin.add_cipher(cipher) return cipher(reader, writer, cipher.pdecrypt, cipher.pdecrypt2, cipher.pencrypt, cipher.pencrypt2) else: return None, None def schedule(rserver, salgorithm, host_name, port): filter_cond = lambda o: o.alive and (not o.match or o.match(host_name) or o.match(str(port))) if salgorithm == 'fa': return next(filter(filter_cond, rserver), None) elif salgorithm == 'rr': for i, roption in enumerate(rserver): if filter_cond(roption): rserver.append(rserver.pop(i)) return roption elif salgorithm == 'rc': filters = [i for i in rserver if filter_cond(i)] return random.choice(filters) if filters else None elif salgorithm == 'lc': return min(filter(filter_cond, rserver), default=None, key=lambda i: i.total) else: raise Exception('Unknown scheduling algorithm') #Unreachable async def stream_handler(reader, writer, unix, lbind, protos, rserver, cipher, sslserver, authtime=86400*30, block=None, salgorithm='fa', verbose=DUMMY, modstat=lambda r,h:lambda i:DUMMY, **kwargs): try: reader, writer = proto.sslwrap(reader, writer, sslserver, True, None, verbose) if unix: remote_ip, server_ip, remote_text = 'local', None, 'unix_local' else: remote_ip, remote_port, *_ = writer.get_extra_info('peername') server_ip = writer.get_extra_info('sockname')[0] remote_text = f'{remote_ip}:{remote_port}' local_addr = None if server_ip in ('127.0.0.1', '::1', None) else (server_ip, 0) reader_cipher, _ = await prepare_ciphers(cipher, reader, writer, server_side=False) lproto, host_name, port, lbuf, rbuf = await proto.parse(protos, reader=reader, writer=writer, authtable=AuthTable(remote_ip, authtime), reader_cipher=reader_cipher, sock=writer.get_extra_info('socket'), **kwargs) if host_name == 'echo': asyncio.ensure_future(lproto.channel(reader, writer, DUMMY, DUMMY)) elif host_name == 'empty': asyncio.ensure_future(lproto.channel(reader, writer, None, DUMMY)) elif block and block(host_name): raise Exception('BLOCK ' + host_name) else: roption = schedule(rserver, salgorithm, host_name, port) or ProxyURI.DIRECT verbose(f'{lproto.name} {remote_text}{roption.logtext(host_name, port)}') try: reader_remote, writer_remote = await roption.open_connection(host_name, port, local_addr, lbind) except asyncio.TimeoutError: raise Exception(f'Connection timeout {roption.bind}') try: reader_remote, writer_remote = await roption.prepare_connection(reader_remote, writer_remote, host_name, port) writer.write(lbuf) writer_remote.write(rbuf) except Exception: writer_remote.close() raise Exception('Unknown remote protocol') m = modstat(remote_ip, host_name) lchannel = lproto.http_channel if rbuf else lproto.channel asyncio.ensure_future(lproto.channel(reader_remote, writer, m(2+roption.direct), m(4+roption.direct))) asyncio.ensure_future(lchannel(reader, writer_remote, m(roption.direct), roption.connection_change)) except Exception as ex: if not isinstance(ex, asyncio.TimeoutError) and not str(ex).startswith('Connection closed'): verbose(f'{str(ex) or "Unsupported protocol"} from {remote_ip}') try: writer.close() except Exception: pass async def reuse_stream_handler(reader, writer, unix, lbind, protos, rserver, urserver, block, cipher, salgorithm, verbose=DUMMY, modstat=lambda r,h:lambda i:DUMMY, **kwargs): try: if unix: remote_ip, server_ip, remote_text = 'local', None, 'unix_local' else: remote_ip, remote_port, *_ = writer.get_extra_info('peername') server_ip = writer.get_extra_info('sockname')[0] remote_text = f'{remote_ip}:{remote_port}' local_addr = None if server_ip in ('127.0.0.1', '::1', None) else (server_ip, 0) reader_cipher, _ = await prepare_ciphers(cipher, reader, writer, server_side=False) lproto = protos[0] except Exception as ex: verbose(f'{str(ex) or "Unsupported protocol"} from {remote_ip}') async def tcp_handler(reader, writer, host_name, port): try: if block and block(host_name): raise Exception('BLOCK ' + host_name) roption = schedule(rserver, salgorithm, host_name, port) or ProxyURI.DIRECT verbose(f'{lproto.name} {remote_text}{roption.logtext(host_name, port)}') try: reader_remote, writer_remote = await roption.open_connection(host_name, port, local_addr, lbind) except asyncio.TimeoutError: raise Exception(f'Connection timeout {roption.bind}') try: reader_remote, writer_remote = await roption.prepare_connection(reader_remote, writer_remote, host_name, port) except Exception: writer_remote.close() raise Exception('Unknown remote protocol') m = modstat(remote_ip, host_name) asyncio.ensure_future(lproto.channel(reader_remote, writer, m(2+roption.direct), m(4+roption.direct))) asyncio.ensure_future(lproto.channel(reader, writer_remote, m(roption.direct), roption.connection_change)) except Exception as ex: if not isinstance(ex, asyncio.TimeoutError) and not str(ex).startswith('Connection closed'): verbose(f'{str(ex) or "Unsupported protocol"} from {remote_ip}') try: writer.close() except Exception: pass async def udp_handler(sendto, data, host_name, port, sid): try: if block and block(host_name): raise Exception('BLOCK ' + host_name) roption = schedule(urserver, salgorithm, host_name, port) or ProxyURI.DIRECT verbose(f'UDP {lproto.name} {remote_text}{roption.logtext(host_name, port)}') data = roption.prepare_udp_connection(host_name, port, data) await roption.open_udp_connection(host_name, port, data, sid, sendto) except Exception as ex: if not str(ex).startswith('Connection closed'): verbose(f'{str(ex) or "Unsupported protocol"} from {remote_ip}') lproto.get_handler(reader, writer, verbose, tcp_handler, udp_handler) async def datagram_handler(writer, data, addr, protos, urserver, block, cipher, salgorithm, verbose=DUMMY, **kwargs): try: remote_ip, remote_port, *_ = addr remote_text = f'{remote_ip}:{remote_port}' data = cipher.datagram.decrypt(data) if cipher else data lproto, host_name, port, data = proto.udp_parse(protos, data, sock=writer.get_extra_info('socket'), **kwargs) if host_name == 'echo': writer.sendto(data, addr) elif host_name == 'empty': pass elif block and block(host_name): raise Exception('BLOCK ' + host_name) else: roption = schedule(urserver, salgorithm, host_name, port) or ProxyURI.DIRECT verbose(f'UDP {lproto.name} {remote_text}{roption.logtext(host_name, port)}') data = roption.prepare_udp_connection(host_name, port, data) def reply(rdata): rdata = lproto.udp_client2(host_name, port, rdata) writer.sendto(cipher.datagram.encrypt(rdata) if cipher else rdata, addr) await roption.open_udp_connection(host_name, port, data, addr, reply) except Exception as ex: if not str(ex).startswith('Connection closed'): verbose(f'{str(ex) or "Unsupported protocol"} from {remote_ip}') async def check_server_alive(interval, rserver, verbose): while True: await asyncio.sleep(interval) for remote in rserver: if remote.direct: continue try: _, writer = await remote.open_connection(None, None, None, None, timeout=3) except asyncio.CancelledError as ex: return except Exception as ex: if remote.alive: verbose(f'{remote.rproto.name} {remote.bind} -> OFFLINE') remote.alive = False continue if not remote.alive: verbose(f'{remote.rproto.name} {remote.bind} -> ONLINE') remote.alive = True try: if remote.backward: writer.write(b'\x00') writer.close() except Exception: pass class BackwardConnection(object): def __init__(self, uri, count): self.uri = uri self.count = count self.closed = False self.conn = asyncio.Queue() async def open_connection(self): while True: reader, writer = await self.conn.get() if not writer.transport.is_closing(): return reader, writer def close(self): self.closed = True try: self.writer.close() except Exception: pass async def start_server(self, handler): for _ in range(self.count): asyncio.ensure_future(self.server_run(handler)) return self async def server_run(self, handler): errwait = 0 while not self.closed: if self.uri.unix: wait = asyncio.open_unix_connection(path=self.uri.bind) else: wait = asyncio.open_connection(host=self.uri.host_name, port=self.uri.port, local_addr=(self.uri.lbind, 0) if self.uri.lbind else None) try: reader, writer = await asyncio.wait_for(wait, timeout=SOCKET_TIMEOUT) writer.write(self.uri.auth) self.writer = writer try: data = await reader.read_n(1) except asyncio.TimeoutError: data = None if data and data[0] != 0: reader._buffer[0:0] = data asyncio.ensure_future(handler(reader, writer)) else: writer.close() errwait = 0 except Exception as ex: try: writer.close() except Exception: pass if not self.closed: await asyncio.sleep(errwait) errwait = min(errwait*1.3 + 0.1, 30) def client_run(self, args): async def handler(reader, writer): if self.uri.auth: try: assert self.uri.auth == (await reader.read_n(len(self.uri.auth))) except Exception: return await self.conn.put((reader, writer)) if self.uri.unix: return asyncio.start_unix_server(handler, path=self.uri.bind) else: return asyncio.start_server(handler, host=self.uri.host_name, port=self.uri.port, reuse_port=args.get('ruport')) class ProxyURI(object): def __init__(self, **kw): self.__dict__.update(kw) self.total = 0 self.udpmap = {} self.handler = None self.streams = None if self.backward: self.backward = BackwardConnection(self, self.backward) def logtext(self, host, port): if self.direct: return f' -> {host}:{port}' elif self.tunnel: return f' ->{(" ssl" if self.sslclient else "")} {self.bind}' else: return f' -> {self.rproto.name+("+ssl" if self.sslclient else "")} {self.bind}' + self.relay.logtext(host, port) def connection_change(self, delta): self.total += delta async def open_udp_connection(self, host, port, data, addr, reply): class Protocol(asyncio.DatagramProtocol): def __init__(prot, data): self.udpmap[addr] = prot prot.databuf = [data] prot.transport = None prot.update = 0 def connection_made(prot, transport): prot.transport = transport for data in prot.databuf: transport.sendto(data) prot.databuf.clear() prot.update = time.perf_counter() def new_data_arrived(prot, data): if prot.transport: prot.transport.sendto(data) else: prot.databuf.append(data) prot.update = time.perf_counter() def datagram_received(prot, data, addr): data = self.cipher.datagram.decrypt(data) if self.cipher else data data = self.rproto.udp_client(data) if not self.direct else data reply(data) prot.update = time.perf_counter() def connection_lost(prot, exc): self.udpmap.pop(addr, None) if addr in self.udpmap: self.udpmap[addr].new_data_arrived(data) else: if self.direct and host == 'tunnel': raise Exception('Unknown tunnel endpoint') self.connection_change(1) if len(self.udpmap) > UDP_LIMIT: min_addr = min(self.udpmap, key=lambda x: self.udpmap[x].update) prot = self.udpmap.pop(min_addr) if prot.transport: prot.transport.close() prot = Protocol(data) remote_addr = (host, port) if self.direct else (self.host_name, self.port) await asyncio.get_event_loop().create_datagram_endpoint(lambda: prot, remote_addr=remote_addr) def prepare_udp_connection(self, host, port, data): if not self.direct: data = self.relay.prepare_udp_connection(host, port, data) whost, wport = (host, port) if self.relay.direct else (self.relay.host_name, self.relay.port) data = self.rproto.udp_connect(rauth=self.auth, host_name=whost, port=wport, data=data) if self.cipher: data = self.cipher.datagram.encrypt(data) return data def start_udp_server(self, args): class Protocol(asyncio.DatagramProtocol): def connection_made(prot, transport): prot.transport = transport def datagram_received(prot, data, addr): asyncio.ensure_future(datagram_handler(prot.transport, data, addr, **vars(self), **args)) return asyncio.get_event_loop().create_datagram_endpoint(Protocol, local_addr=(self.host_name, self.port)) async def open_connection(self, host, port, local_addr, lbind, timeout=SOCKET_TIMEOUT): if self.reuse or self.ssh: if self.streams is None or self.streams.done() and (self.reuse and not self.handler): self.streams = asyncio.get_event_loop().create_future() else: if not self.streams.done(): await self.streams return self.streams.result() try: local_addr = local_addr if self.lbind == 'in' else (self.lbind, 0) if self.lbind else \ local_addr if lbind == 'in' else (lbind, 0) if lbind else None family = 0 if local_addr is None else socket.AF_INET6 if ':' in local_addr[0] else socket.AF_INET if self.direct: if host == 'tunnel': raise Exception('Unknown tunnel endpoint') wait = asyncio.open_connection(host=host, port=port, local_addr=local_addr, family=family) elif self.ssh: try: import asyncssh for s in ('read_', 'read_n', 'read_until'): setattr(asyncssh.SSHReader, s, getattr(asyncio.StreamReader, s)) except Exception: raise Exception('Missing library: "pip3 install asyncssh"') username, password = self.auth.decode().split(':', 1) if password.startswith(':'): client_keys = [password[1:]] password = None else: client_keys = None conn = await asyncssh.connect(host=self.host_name, port=self.port, local_addr=local_addr, family=family, x509_trusted_certs=None, known_hosts=None, username=username, password=password, client_keys=client_keys, keepalive_interval=60) if not self.streams.done(): self.streams.set_result((conn, None)) return conn, None elif self.backward: wait = self.backward.open_connection() elif self.unix: wait = asyncio.open_unix_connection(path=self.bind) else: wait = asyncio.open_connection(host=self.host_name, port=self.port, local_addr=local_addr, family=family) reader, writer = await asyncio.wait_for(wait, timeout=timeout) except Exception as ex: if self.reuse: self.streams.set_exception(ex) self.streams = None raise return reader, writer def prepare_connection(self, reader_remote, writer_remote, host, port): if self.reuse and not self.handler: self.handler = self.rproto.get_handler(reader_remote, writer_remote, DUMMY) return self.prepare_ciphers_and_headers(reader_remote, writer_remote, host, port, self.handler) async def prepare_ciphers_and_headers(self, reader_remote, writer_remote, host, port, handler): if not self.direct: reader_remote, writer_remote = proto.sslwrap(reader_remote, writer_remote, self.sslclient, False, self.host_name) if not handler or not handler.ready: _, writer_cipher_r = await prepare_ciphers(self.cipher, reader_remote, writer_remote, self.bind) else: writer_cipher_r = None whost, wport = (host, port) if self.relay.direct else (self.relay.host_name, self.relay.port) if self.rproto.reuse(): if not self.streams.done(): self.streams.set_result((reader_remote, writer_remote)) reader_remote, writer_remote = handler.connect(whost, wport) elif self.ssh: reader_remote, writer_remote = await reader_remote.open_connection(whost, wport) else: await self.rproto.connect(reader_remote=reader_remote, writer_remote=writer_remote, rauth=self.auth, host_name=whost, port=wport, writer_cipher_r=writer_cipher_r, myhost=self.host_name, sock=writer_remote.get_extra_info('socket')) return await self.relay.prepare_ciphers_and_headers(reader_remote, writer_remote, host, port, handler) return reader_remote, writer_remote def start_server(self, args): handler = functools.partial(reuse_stream_handler if self.reuse else stream_handler, **vars(self), **args) if self.backward: return self.backward.start_server(handler) elif self.unix: return asyncio.start_unix_server(handler, path=self.bind) else: return asyncio.start_server(handler, host=self.host_name, port=self.port, reuse_port=args.get('ruport')) async def tcp_connect(self, host, port, local_addr=None, lbind=None): reader, writer = await self.open_connection(host, port, local_addr, lbind) try: reader, writer = await self.prepare_connection(reader, writer, host, port) except Exception: writer.close() raise return reader, writer async def udp_sendto(self, host, port, data, answer_cb, local_addr=None): if local_addr is None: local_addr = random.randrange(2**32) data = self.prepare_udp_connection(host, port, data) await self.open_udp_connection(host, port, data, local_addr, answer_cb) @classmethod def compile_rule(cls, filename): if filename.startswith("{") and filename.endswith("}"): return re.compile(filename[1:-1]).match with open(filename) as f: return re.compile('(:?'+''.join('|'.join(i.strip() for i in f if i.strip() and not i.startswith('#')))+')$').match @classmethod def compile_relay(cls, uri): tail = cls.DIRECT for urip in reversed(uri.split('__')): tail = cls.compile(urip, tail) return tail @classmethod def compile(cls, uri, relay=None): scheme, _, uri = uri.partition('://') url = urllib.parse.urlparse('s://'+uri) rawprotos = scheme.split('+') err_str, protos = proto.get_protos(rawprotos) if err_str: raise argparse.ArgumentTypeError(err_str) if 'ssl' in rawprotos or 'secure' in rawprotos: import ssl sslserver = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) sslclient = ssl.create_default_context(ssl.Purpose.SERVER_AUTH) if 'ssl' in rawprotos: sslclient.check_hostname = False sslclient.verify_mode = ssl.CERT_NONE else: sslserver = sslclient = None protonames = [i.name for i in protos] if 'pack' in protonames and relay and relay != cls.DIRECT: raise argparse.ArgumentTypeError('pack protocol cannot relay to other proxy') urlpath, _, plugins = url.path.partition(',') urlpath, _, lbind = urlpath.partition('@') plugins = plugins.split(',') if plugins else None cipher, _, loc = url.netloc.rpartition('@') if cipher: from .cipher import get_cipher if ':' not in cipher: try: cipher = base64.b64decode(cipher).decode() except Exception: pass if ':' not in cipher: raise argparse.ArgumentTypeError('userinfo must be "cipher:key"') err_str, cipher = get_cipher(cipher) if err_str: raise argparse.ArgumentTypeError(err_str) if plugins: from .plugin import get_plugin for name in plugins: if not name: continue err_str, plugin = get_plugin(name) if err_str: raise argparse.ArgumentTypeError(err_str) cipher.plugins.append(plugin) match = cls.compile_rule(url.query) if url.query else None if loc: host_name, _, port = loc.partition(':') port = int(port) if port else (22 if 'ssh' in rawprotos else 8080) else: host_name = port = None return ProxyURI(protos=protos, rproto=protos[0], cipher=cipher, auth=url.fragment.encode(), \ match=match, bind=loc or urlpath, host_name=host_name, port=port, \ unix=not loc, lbind=lbind, sslclient=sslclient, sslserver=sslserver, \ alive=True, direct='direct' in protonames, tunnel='tunnel' in protonames, \ reuse='pack' in protonames or relay and relay.reuse, backward=rawprotos.count('in'), \ ssh='ssh' in rawprotos, relay=relay) ProxyURI.DIRECT = ProxyURI(direct=True, tunnel=False, reuse=False, relay=None, alive=True, match=None, cipher=None, backward=None, ssh=None, lbind=None) async def test_url(url, rserver): url = urllib.parse.urlparse(url) assert url.scheme in ('http', 'https'), f'Unknown scheme {url.scheme}' host_name, _, port = url.netloc.partition(':') port = int(port) if port else 80 if url.scheme == 'http' else 443 initbuf = f'GET {url.path or "/"} HTTP/1.1\r\nHost: {host_name}\r\nUser-Agent: pproxy-{__version__}\r\nAccept: */*\r\nConnection: close\r\n\r\n'.encode() for roption in rserver: print(f'============ {roption.bind} ============') try: reader, writer = await roption.open_connection(host_name, port, None, None) except asyncio.TimeoutError: raise Exception(f'Connection timeout {rserver}') try: reader, writer = await roption.prepare_connection(reader, writer, host_name, port) except Exception: writer.close() raise Exception('Unknown remote protocol') if url.scheme == 'https': import ssl sslclient = ssl.create_default_context(ssl.Purpose.SERVER_AUTH) sslclient.check_hostname = False sslclient.verify_mode = ssl.CERT_NONE reader, writer = proto.sslwrap(reader, writer, sslclient, False, host_name) writer.write(initbuf) headers = await reader.read_until(b'\r\n\r\n') print(headers.decode()[:-4]) print(f'--------------------------------') body = bytearray() while 1: s = await reader.read_() if not s: break body.extend(s) print(body.decode('utf8', 'ignore')) print(f'============ success ============') def main(): parser = argparse.ArgumentParser(description=__description__+'\nSupported protocols: http,socks4,socks5,shadowsocks,shadowsocksr,redirect,pf,tunnel', epilog=f'Online help: <{__url__}>') parser.add_argument('-l', dest='listen', default=[], action='append', type=ProxyURI.compile, help='tcp server uri (default: http+socks4+socks5://:8080/)') parser.add_argument('-r', dest='rserver', default=[], action='append', type=ProxyURI.compile_relay, help='tcp remote server uri (default: direct)') parser.add_argument('-ul', dest='ulisten', default=[], action='append', type=ProxyURI.compile, help='udp server setting uri (default: none)') parser.add_argument('-ur', dest='urserver', default=[], action='append', type=ProxyURI.compile_relay, help='udp remote server uri (default: direct)') parser.add_argument('-b', dest='block', type=ProxyURI.compile_rule, help='block regex rules') parser.add_argument('-a', dest='alived', default=0, type=int, help='interval to check remote alive (default: no check)') parser.add_argument('-s', dest='salgorithm', default='fa', choices=('fa', 'rr', 'rc', 'lc'), help='scheduling algorithm (default: first_available)') parser.add_argument('-v', dest='v', action='count', help='print verbose output') parser.add_argument('--ssl', dest='sslfile', help='certfile[,keyfile] if server listen in ssl mode') parser.add_argument('--pac', help='http PAC path') parser.add_argument('--get', dest='gets', default=[], action='append', help='http custom {path,file}') parser.add_argument('--auth', dest='authtime', type=int, default=86400*30, help='re-auth time interval for same ip (default: 86400*30)') parser.add_argument('--sys', action='store_true', help='change system proxy setting (mac, windows)') parser.add_argument('--reuse', dest='ruport', action='store_true', help='set SO_REUSEPORT (Linux only)') parser.add_argument('--daemon', dest='daemon', action='store_true', help='run as a daemon (Linux only)') parser.add_argument('--test', help='test this url for all remote proxies and exit') parser.add_argument('--version', action='version', version=f'%(prog)s {__version__}') args = parser.parse_args() if args.test: asyncio.get_event_loop().run_until_complete(test_url(args.test, args.rserver)) return if not args.listen and not args.ulisten: args.listen.append(ProxyURI.compile_relay('http+socks4+socks5://:8080/')) args.httpget = {} if args.pac: pactext = 'function FindProxyForURL(u,h){' + (f'var b=/^(:?{args.block.__self__.pattern})$/i;if(b.test(h))return "";' if args.block else '') for i, option in enumerate(args.rserver): pactext += (f'var m{i}=/^(:?{option.match.__self__.pattern})$/i;if(m{i}.test(h))' if option.match else '') + 'return "PROXY %(host)s";' args.httpget[args.pac] = pactext+'return "DIRECT";}' args.httpget[args.pac+'/all'] = 'function FindProxyForURL(u,h){return "PROXY %(host)s";}' args.httpget[args.pac+'/none'] = 'function FindProxyForURL(u,h){return "DIRECT";}' for gets in args.gets: path, filename = gets.split(',', 1) with open(filename, 'rb') as f: args.httpget[path] = f.read() if args.sslfile: sslfile = args.sslfile.split(',') for option in args.listen: if option.sslclient: option.sslclient.load_cert_chain(*sslfile) option.sslserver.load_cert_chain(*sslfile) elif any(map(lambda o: o.sslclient, args.listen)): print('You must specify --ssl to listen in ssl mode') return if args.daemon: try: __import__('daemon').DaemonContext().open() except ModuleNotFoundError: print("Missing library: pip3 install python-daemon") return # Try to use uvloop instead of the default event loop try: __import__('uvloop').install() print('Using uvloop') except ModuleNotFoundError: pass loop = asyncio.get_event_loop() if args.v: from . import verbose verbose.setup(loop, args) servers = [] for option in args.listen: print('Serving on', option.bind, 'by', ",".join(i.name for i in option.protos) + ('(SSL)' if option.sslclient else ''), '({}{})'.format(option.cipher.name, ' '+','.join(i.name() for i in option.cipher.plugins) if option.cipher and option.cipher.plugins else '') if option.cipher else '') try: server = loop.run_until_complete(option.start_server(vars(args))) servers.append(server) except Exception as ex: print('Start server failed.\n\t==>', ex) for option in args.ulisten: print('Serving on UDP', option.bind, 'by', ",".join(i.name for i in option.protos), f'({option.cipher.name})' if option.cipher else '') try: server, protocol = loop.run_until_complete(option.start_udp_server(vars(args))) servers.append(server) except Exception as ex: print('Start server failed.\n\t==>', ex) for option in args.rserver: if option.backward: print('Serving on', option.bind, 'backward by', ",".join(i.name for i in option.protos) + ('(SSL)' if option.sslclient else ''), '({}{})'.format(option.cipher.name, ' '+','.join(i.name() for i in option.cipher.plugins) if option.cipher and option.cipher.plugins else '') if option.cipher else '') try: server = loop.run_until_complete(option.backward.client_run(vars(args))) servers.append(server) except Exception as ex: print('Start server failed.\n\t==>', ex) if servers: if args.sys: from . import sysproxy args.sys = sysproxy.setup(args) if args.alived > 0 and args.rserver: asyncio.ensure_future(check_server_alive(args.alived, args.rserver, args.verbose if args.v else DUMMY)) try: loop.run_forever() except KeyboardInterrupt: print('exit') if args.sys: args.sys.clear() for task in asyncio.Task.all_tasks(): task.cancel() for server in servers: server.close() for server in servers: if hasattr(server, 'wait_closed'): loop.run_until_complete(server.wait_closed()) loop.run_until_complete(loop.shutdown_asyncgens()) loop.close() if __name__ == '__main__': main()
import argparse import logging import math import os import random import time from copy import deepcopy from pathlib import Path from threading import Thread import numpy as np import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import torch.optim.lr_scheduler as lr_scheduler import torch.utils.data import yaml from torch.cuda import amp from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.tensorboard import SummaryWriter from tqdm import tqdm import torch import numpy as np import random import predict # import predict.py to get mAP after each epoch from models.experimental import attempt_load from models.yolo import Model from utils.autoanchor import check_anchors from utils.datasets import create_dataloader from utils.general import labels_to_class_weights, increment_path, labels_to_image_weights, init_seeds, \ fitness, strip_optimizer, get_latest_run, check_dataset, check_file, check_git_status, check_img_size, \ check_requirements, print_mutation, set_logging, one_cycle, colorstr from utils.google_utils import attempt_download from utils.loss import ComputeLoss from utils.plots import plot_images, plot_labels, plot_results, plot_evolution from utils.torch_utils import ModelEMA, select_device, intersect_dicts, torch_distributed_zero_first, de_parallel from utils.wandb_logging.wandb_utils import WandbLogger, check_wandb_resume import json from PIL import Image import os import shutil from os import path import sys sys.path.append(path.dirname( path.dirname( path.abspath(__file__) ) )) from utils.general import xyxy2xywh logger = logging.getLogger(__name__) def train(hyp, opt, device, tb_writer=None): logger.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items())) save_dir, epochs, batch_size, total_batch_size, weights, rank = \ Path(opt.save_dir), opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank # Directories wdir = save_dir / 'weights' wdir.mkdir(parents=True, exist_ok=True) # make dir last = wdir / 'last.pt' best = wdir / 'best.pt' results_file = save_dir / 'results.txt' # Save run settings with open(save_dir / 'hyp.yaml', 'w') as f: yaml.safe_dump(hyp, f, sort_keys=False) with open(save_dir / 'opt.yaml', 'w') as f: yaml.safe_dump(vars(opt), f, sort_keys=False) # Configure # plots = not opt.evolve # create plots plots = True # create plots cuda = device.type != 'cpu' init_seeds(1 + rank) with open(opt.data) as f: data_dict = yaml.safe_load(f) # data dict # Logging- Doing this before checking the dataset. Might update data_dict loggers = {'wandb': None} # loggers dict if rank in [-1, 0]: opt.hyp = hyp # add hyperparameters run_id = torch.load(weights).get('wandb_id') if weights.endswith('.pt') and os.path.isfile(weights) else None wandb_logger = WandbLogger(opt, save_dir.stem, run_id, data_dict) loggers['wandb'] = wandb_logger.wandb data_dict = wandb_logger.data_dict if wandb_logger.wandb: weights, epochs, hyp = opt.weights, opt.epochs, opt.hyp # WandbLogger might update weights, epochs if resuming nc = 1 if opt.single_cls else int(data_dict['nc']) # number of classes names = ['item'] if opt.single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names assert len(names) == nc, '%g names found for nc=%g dataset in %s' % (len(names), nc, opt.data) # check is_coco = opt.data.endswith('coco.yaml') and nc == 80 # COCO dataset # Model pretrained = weights.endswith('.pt') if pretrained: # with torch_distributed_zero_first(rank): # weights = attempt_download(weights) # download if not found locally ckpt = torch.load(weights, map_location=device) # load checkpoint model = Model(opt.cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create exclude = ['anchor'] if (opt.cfg or hyp.get('anchors')) and not opt.resume else [] # exclude keys state_dict = ckpt['model'].float().state_dict() # to FP32 state_dict = intersect_dicts(state_dict, model.state_dict(), exclude=exclude) # intersect model.load_state_dict(state_dict, strict=False) # load logger.info('Transferred %g/%g items from %s' % (len(state_dict), len(model.state_dict()), weights)) # report else: model = Model(opt.cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create with torch_distributed_zero_first(rank): check_dataset(data_dict) # check train_path = data_dict['train'] test_path = data_dict['val'] # Freeze freeze = ['1', '2', '3', '4', '5', '6' '7', '8', '9', '10', '11'] # parameter names to freeze (full or partial) freeze = ['model.' + number + '.' for number in freeze] for k, v in model.named_parameters(): v.requires_grad = True # train all layers if any(x in k for x in freeze) and opt.fine_tune is True: print('freezing %s' % k) v.requires_grad = False # Optimizer nbs = 64 # nominal batch size accumulate = max(round(nbs / total_batch_size), 1) # accumulate loss before optimizing hyp['weight_decay'] *= total_batch_size * accumulate / nbs # scale weight_decay logger.info(f"Scaled weight_decay = {hyp["weight_decay"]}") pg0, pg1, pg2 = [], [], [] # optimizer parameter groups for k, v in model.named_modules(): if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter): pg2.append(v.bias) # biases if isinstance(v, nn.BatchNorm2d): pg0.append(v.weight) # no decay elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter): pg1.append(v.weight) # apply decay if opt.adam: optimizer = optim.Adam(pg0, lr=hyp['lr0'], betas=(hyp['momentum'], 0.999)) # adjust beta1 to momentum else: optimizer = optim.SGD(pg0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True) optimizer.add_param_group({'params': pg1, 'weight_decay': hyp['weight_decay']}) # add pg1 with weight_decay optimizer.add_param_group({'params': pg2}) # add pg2 (biases) logger.info('Optimizer groups: %g .bias, %g conv.weight, %g other' % (len(pg2), len(pg1), len(pg0))) del pg0, pg1, pg2 # Scheduler https://arxiv.org/pdf/1812.01187.pdf # https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#OneCycleLR if opt.linear_lr: lf = lambda x: (1 - x / (epochs - 1)) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear else: lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf'] scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) # plot_lr_scheduler(optimizer, scheduler, epochs) # EMA ema = ModelEMA(model) if rank in [-1, 0] else None # Resume start_epoch, best_fitness = 0, 0.0 if pretrained: # Optimizer if ckpt['optimizer'] is not None: optimizer.load_state_dict(ckpt['optimizer']) best_fitness = ckpt['best_fitness'] # EMA if ema and ckpt.get('ema'): ema.ema.load_state_dict(ckpt['ema'].float().state_dict()) ema.updates = ckpt['updates'] # Results if ckpt.get('training_results') is not None: results_file.write_text(ckpt['training_results']) # write results.txt # Epochs start_epoch = ckpt['epoch'] + 1 if opt.resume: assert start_epoch > 0, '%s training to %g epochs is finished, nothing to resume.' % (weights, epochs) if epochs < start_epoch: logger.info('%s has been trained for %g epochs. Fine-tuning for %g additional epochs.' % (weights, ckpt['epoch'], epochs)) epochs += ckpt['epoch'] # finetune additional epochs del ckpt, state_dict # Image sizes gs = max(int(model.stride.max()), 32) # grid size (max stride) nl = model.model[-1].nl # number of detection layers (used for scaling hyp['obj']) imgsz, imgsz_test = [check_img_size(x, gs) for x in opt.img_size] # verify imgsz are gs-multiples # DP mode if cuda and rank == -1 and torch.cuda.device_count() > 1: model = torch.nn.DataParallel(model) # SyncBatchNorm if opt.sync_bn and cuda and rank != -1: model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device) logger.info('Using SyncBatchNorm()') # Trainloader dataloader, dataset = create_dataloader(train_path, imgsz, batch_size, gs, opt, hyp=hyp, augment=True, cache=opt.cache_images, rect=opt.rect, rank=rank, world_size=opt.world_size, workers=opt.workers, image_weights=opt.image_weights, quad=opt.quad, prefix=colorstr('train: '), task='train', epoch_parts=opt.epoch_parts) mlc = np.concatenate(dataset.labels, 0)[:, 0].max() # max label class nb = len(dataloader) # number of batches assert mlc < nc, 'Label class %g exceeds nc=%g in %s. Possible class labels are 0-%g' % (mlc, nc, opt.data, nc - 1) # Process 0 if rank in [-1, 0]: testloader = create_dataloader(test_path, imgsz_test, batch_size * 2, gs, opt, # testloader hyp=hyp, cache=opt.cache_images and not opt.notest, rect=True, rank=-1, world_size=opt.world_size, workers=opt.workers, pad=0.5, prefix=colorstr('val: '))[0] if not opt.resume: labels = np.concatenate(dataset.labels, 0) c = torch.tensor(labels[:, 0]) # classes # cf = torch.bincount(c.long(), minlength=nc) + 1. # frequency # model._initialize_biases(cf.to(device)) if plots: plot_labels(labels, names, save_dir, loggers) if tb_writer: tb_writer.add_histogram('classes', c, 0) # Anchors if not opt.noautoanchor: check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz) model.half().float() # pre-reduce anchor precision # DDP mode if cuda and rank != -1: model = DDP(model, device_ids=[opt.local_rank], output_device=opt.local_rank, # nn.MultiheadAttention incompatibility with DDP https://github.com/pytorch/pytorch/issues/26698 find_unused_parameters=any(isinstance(layer, nn.MultiheadAttention) for layer in model.modules())) # Model parameters hyp['box'] *= 3. / nl # scale to layers hyp['cls'] *= nc / 80. * 3. / nl # scale to classes and layers hyp['obj'] *= (imgsz / 640) ** 2 * 3. / nl # scale to image size and layers hyp['label_smoothing'] = opt.label_smoothing model.nc = nc # attach number of classes to model model.hyp = hyp # attach hyperparameters to model model.gr = 1.0 # iou loss ratio (obj_loss = 1.0 or iou) model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc # attach class weights model.names = names # Start training t0 = time.time() nw = max(round(hyp['warmup_epochs'] * nb), 1000) # number of warmup iterations, max(3 epochs, 1k iterations) # nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training maps = np.zeros(nc) # mAP per class results = (0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls) scheduler.last_epoch = start_epoch - 1 # do not move scaler = amp.GradScaler(enabled=cuda) compute_loss = ComputeLoss(model) # init loss class logger.info(f'Image sizes {imgsz} train, {imgsz_test} test\n' f'Using {dataloader.num_workers} dataloader workers\n' f'Logging results to {save_dir}\n' f'Starting training for {epochs} epochs...') for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------ model.train() # Update image weights (optional) if opt.image_weights: # Generate indices if rank in [-1, 0]: cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc # class weights iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw) # image weights dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n) # rand weighted idx # Broadcast if DDP if rank != -1: indices = (torch.tensor(dataset.indices) if rank == 0 else torch.zeros(dataset.n)).int() dist.broadcast(indices, 0) if rank != 0: dataset.indices = indices.cpu().numpy() # Update mosaic border # b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs) # dataset.mosaic_border = [b - imgsz, -b] # height, width borders mloss = torch.zeros(4, device=device) # mean losses if rank != -1: dataloader.sampler.set_epoch(epoch) pbar = enumerate(dataloader) logger.info(('\n' + '%10s' * 8) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'total', 'labels', 'img_size')) if rank in [-1, 0]: pbar = tqdm(pbar, total=nb) # progress bar optimizer.zero_grad() for i, (imgs, targets, paths, _) in pbar: # batch ------------------------------------------------------------- ni = i + nb * epoch # number integrated batches (since train start) imgs = imgs.to(device, non_blocking=True).float() / 255.0 # uint8 to float32, 0-255 to 0.0-1.0 # Warmup if ni <= nw: xi = [0, nw] # x interp # model.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou) accumulate = max(1, np.interp(ni, xi, [1, nbs / total_batch_size]).round()) for j, x in enumerate(optimizer.param_groups): # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0 x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 2 else 0.0, x['initial_lr'] * lf(epoch)]) if 'momentum' in x: x['momentum'] = np.interp(ni, xi, [hyp['warmup_momentum'], hyp['momentum']]) # Multi-scale if opt.multi_scale: sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size sf = sz / max(imgs.shape[2:]) # scale factor if sf != 1: ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple) imgs = F.interpolate(imgs, size=ns, mode='bilinear', align_corners=False) # Forward with amp.autocast(enabled=cuda): pred = model(imgs) # forward loss, loss_items = compute_loss(pred, targets.to(device)) # loss scaled by batch_size if rank != -1: loss *= opt.world_size # gradient averaged between devices in DDP mode if opt.quad: loss *= 4. # Backward scaler.scale(loss).backward() # Optimize if ni % accumulate == 0: scaler.step(optimizer) # optimizer.step scaler.update() optimizer.zero_grad() if ema: ema.update(model) # Print if rank in [-1, 0]: mloss = (mloss * i + loss_items) / (i + 1) # update mean losses mem = '%.3gG' % (torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0) # (GB) s = ('%10s' * 2 + '%10.6g' * 6) % ( '%g/%g' % (epoch, epochs - 1), mem, *mloss, targets.shape[0], imgs.shape[-1]) pbar.set_description(s) # Plot if plots and ni < 3: f = save_dir / f'train_batch{ni}.jpg' # filename Thread(target=plot_images, args=(imgs, targets, paths, f), daemon=True).start() if tb_writer: tb_writer.add_graph(torch.jit.trace(de_parallel(model), imgs, strict=False), []) # model graph # tb_writer.add_image(f, result, dataformats='HWC', global_step=epoch) elif plots and ni == 10 and wandb_logger.wandb: wandb_logger.log({"Mosaics": [wandb_logger.wandb.Image(str(x), caption=x.name) for x in save_dir.glob('train*.jpg') if x.exists()]}) # end batch ------------------------------------------------------------------------------------------------ # end epoch ---------------------------------------------------------------------------------------------------- # Scheduler lr = [x['lr'] for x in optimizer.param_groups] # for tensorboard scheduler.step() # DDP process 0 or single-GPU if rank in [-1, 0]: # mAP ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'gr', 'names', 'stride', 'class_weights']) final_epoch = epoch + 1 == epochs if (epoch+1) % opt.save_period != 0: wandb_logger.current_epoch = epoch + 1 # Log tags = ['train/box_loss', 'train/obj_loss', 'train/cls_loss', # train loss 'x/lr0', 'x/lr1', 'x/lr2'] # params for x, tag in zip(list(mloss[:-1]) + lr, tags): if tb_writer: tb_writer.add_scalar(tag, x, epoch) # tensorboard if wandb_logger.wandb: wandb_logger.log({tag: x}) # W&B wandb_logger.end_epoch() # Write with open(results_file, 'a') as f: f.write(s + '\n') # append metrics, val_loss else: if not opt.notest or final_epoch: # Calculate mAP wandb_logger.current_epoch = epoch + 1 results, maps, times = predict.test(data_dict, batch_size=batch_size * 2, imgsz=imgsz_test, model=ema.ema, single_cls=opt.single_cls, dataloader=testloader, save_dir=save_dir, save_json=is_coco and final_epoch, verbose=nc < 50, plots=plots and final_epoch, wandb_logger=wandb_logger, compute_loss=compute_loss, is_coco=is_coco) # Write with open(results_file, 'a') as f: f.write(s + '%10.4g' * 8 % results + '\n') # append metrics, val_loss # Log tags = ['train/box_loss', 'train/obj_loss', 'train/cls_loss', # train loss 'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.75', 'metrics/mAP_0.5:0.95', 'val/box_loss', 'val/obj_loss', 'val/cls_loss', # val loss 'x/lr0', 'x/lr1', 'x/lr2'] # params for x, tag in zip(list(mloss[:-1]) + list(results) + lr, tags): if tb_writer: tb_writer.add_scalar(tag, x, epoch) # tensorboard if wandb_logger.wandb: wandb_logger.log({tag: x}) # W&B # Update best mAP fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.75, mAP@.5-.95] if fi > best_fitness: best_fitness = fi wandb_logger.end_epoch(best_result=best_fitness == fi) # Save model if (not opt.nosave) or (final_epoch and not opt.evolve): # if save ckpt = {'epoch': epoch, 'best_fitness': best_fitness, 'training_results': results_file.read_text(), 'model': deepcopy(de_parallel(model)).half(), 'ema': deepcopy(ema.ema).half(), 'updates': ema.updates, 'optimizer': optimizer.state_dict(), 'wandb_id': wandb_logger.wandb_run.id if wandb_logger.wandb else None} # Save last, best and delete torch.save(ckpt, last) if best_fitness == fi: torch.save(ckpt, best) if wandb_logger.wandb: if ((epoch + 1) % opt.save_period == 0 and not final_epoch) and opt.save_period != -1: wandb_logger.log_model( last.parent, opt, epoch, fi, best_model=best_fitness == fi) del ckpt # end epoch ---------------------------------------------------------------------------------------------------- # end training if rank in [-1, 0]: logger.info(f'{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.\n') if plots: plot_results(save_dir=save_dir) # save as results.png if wandb_logger.wandb: files = ['results.png', 'confusion_matrix.png', *[f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R')]] wandb_logger.log({"Results": [wandb_logger.wandb.Image(str(save_dir / f), caption=f) for f in files if (save_dir / f).exists()]}) if not opt.evolve: if is_coco: # COCO dataset for m in [last, best] if best.exists() else [last]: # speed, mAP tests results, _, _ = predict.test(opt.data, batch_size=batch_size * 2, imgsz=imgsz_test, conf_thres=0.001, iou_thres=0.7, model=attempt_load(m, device).half(), single_cls=opt.single_cls, dataloader=testloader, save_dir=save_dir, save_json=True, plots=False, is_coco=is_coco) # Strip optimizers for f in last, best: if f.exists(): strip_optimizer(f) # strip optimizers if wandb_logger.wandb: # Log the stripped model wandb_logger.wandb.log_artifact(str(best if best.exists() else last), type='model', name='run_' + wandb_logger.wandb_run.id + '_model', aliases=['latest', 'best', 'stripped']) wandb_logger.finish_run() else: dist.destroy_process_group() torch.cuda.empty_cache() return results def data_prepare(): random.seed(100) names = ['eye_opened', 'eye_closed', 'mouth_opened', 'mouth_closed', 'face', 'phone', 'cigar'] path_train_dir = '/DATA/Final_DATA/task03_train' new_dir = '../drowsy_face' # generate raw_train.json, raw_val.json generate_raw_json = True if generate_raw_json == True: print('generate raw_train.json, raw_val.json') if os.path.exists(new_dir): shutil.rmtree(new_dir) os.makedirs(new_dir + '/images/train') os.makedirs(new_dir + '/images/val') os.makedirs(new_dir + '/labels/train') os.makedirs(new_dir + '/labels/val') with open(path_train_dir + '/labels.json') as f: json_data = json.load(f) json_anno = json_data["annotations"] num_data = len(json_anno) # 273224 val_idx = random.sample(list(range(num_data)), 20000) json_anno_val = [] json_anno_train = [] for idx, json_img in enumerate(tqdm(json_anno)): if idx in val_idx: json_anno_val.append(json_img) else: json_anno_train.append(json_img) json_data_val = {} json_data_val['annotations'] = json_anno_val json_data_train = {} json_data_train['annotations'] = json_anno_train if os.path.isfile(new_dir + '/raw_val.json'): os.remove(new_dir + '/raw_val.json') if os.path.isfile(new_dir + '/raw_train.json'): os.remove(new_dir + '/raw_train.json') with open(new_dir + '/raw_val.json', 'w') as f_val: json.dump(json_data_val, f_val) with open(new_dir + '/raw_train.json', 'w') as f_train: json.dump(json_data_train, f_train) # generate drowsy_face/train, drowsy_face/val generate_drowsy_face = True if generate_drowsy_face == True: print('generate drowsy_face/train, drowsy_face/val') with open(new_dir + '/raw_val.json') as f: json_data = json.load(f) json_anno = json_data["annotations"] for json_img in tqdm(json_anno): img_id = json_img['file_name'] txt_dir = new_dir + '/labels/val/' + img_id.split('.')[0] + '.txt' img_dir = new_dir + '/images/val/' + img_id f_txt = open(txt_dir, 'w') img_ = Image.open(path_train_dir + '/images/' + img_id) img_size = img_.size objects_yolo = '' for img_obj in json_img['objects']: class_id = str(names.index(img_obj['class'])) img_pos = img_obj['position'] xywh = xyxy2xywh(np.array([[img_pos[0]/img_size[0], img_pos[1]/img_size[1], img_pos[2]/img_size[0], img_pos[3]/img_size[1]]]))[0] f_txt.write(f"{class_id} {xywh[0]:.5f} {xywh[1]:.5f} {xywh[2]:.5f} {xywh[3]:.5f}\n") # write label f_txt.close() shutil.copy(path_train_dir + '/images/' + img_id, img_dir) with open(new_dir + '/raw_train.json') as f: json_data = json.load(f) json_anno = json_data["annotations"] for json_img in tqdm(json_anno): img_id = json_img['file_name'] txt_dir = new_dir + '/labels/train/' + img_id.split('.')[0] + '.txt' img_dir = new_dir + '/images/train/' + img_id f_txt = open(txt_dir, 'w') img_ = Image.open(path_train_dir + '/images/' + img_id) img_size = img_.size objects_yolo = '' for img_obj in json_img['objects']: class_id = str(names.index(img_obj['class'])) img_pos = img_obj['position'] xywh = xyxy2xywh(np.array([[img_pos[0]/img_size[0], img_pos[1]/img_size[1], img_pos[2]/img_size[0], img_pos[3]/img_size[1]]]))[0] f_txt.write(f"{class_id} {xywh[0]:.5f} {xywh[1]:.5f} {xywh[2]:.5f} {xywh[3]:.5f}\n") # write label f_txt.close() shutil.copy(path_train_dir + '/images/' + img_id, img_dir) # generate diet_train.json generate_diet_json = True if generate_diet_json == True: print('generate diet_train.json') json_anno_diet = [] with open(path_train_dir + '/labels.json') as f: json_data = json.load(f) json_anno = json_data["annotations"] fidx = 0 for img_info in tqdm(json_anno): file_name = img_info['file_name'] cigar_check = 0 phone_check = 0 eye_closed_check = 0 mouth_closed_check = 0 mouth_opened_check = 0 for annotation_info in img_info['objects']: if annotation_info['class'] == 'cigar': cigar_check = 1 elif annotation_info['class'] == 'phone': phone_check = 1 elif annotation_info['class'] == 'eye_closed': eye_closed_check = 1 elif annotation_info['class'] == 'mouth_closed': mouth_closed_check = 1 elif annotation_info['class'] == 'mouth_opened': mouth_opened_check = 1 if cigar_check or phone_check: json_anno_diet.append(img_info) elif eye_closed_check and mouth_closed_check: json_anno_diet.append(img_info) elif eye_closed_check and mouth_opened_check: json_anno_diet.append(img_info) elif mouth_opened_check: fidx = fidx + 1 if fidx % 3 == 0: json_anno_diet.append(img_info) json_data_diet = {} json_data_diet['annotations'] = json_anno_diet if os.path.isfile(new_dir + '/diet_train.json'): os.remove(new_dir + '/diet_train.json') with open(new_dir + '/diet_train.json', 'w') as f_diet: json.dump(json_data_diet, f_diet) # generate drowsy_face_diet/train generate_drowsy_face_diet = True if generate_drowsy_face_diet == True: print('generate drowsy_face_diet/train') new_dir_diet = '../drowsy_face_diet' if os.path.exists(new_dir_diet): shutil.rmtree(new_dir_diet) os.makedirs(new_dir_diet + '/images/train') os.makedirs(new_dir_diet + '/labels/train') with open(new_dir + '/diet_train.json') as f: json_data = json.load(f) json_anno = json_data["annotations"] for json_img in tqdm(json_anno): img_id = json_img['file_name'] txt_dir = new_dir_diet + '/labels/train/' + img_id.split('.')[0] + '.txt' img_dir = new_dir_diet + '/images/train/' + img_id f_txt = open(txt_dir, 'w') img_ = Image.open(path_train_dir + '/images/' + img_id) img_size = img_.size objects_yolo = '' for img_obj in json_img['objects']: class_id = str(names.index(img_obj['class'])) img_pos = img_obj['position'] xywh = xyxy2xywh(np.array([[img_pos[0]/img_size[0], img_pos[1]/img_size[1], img_pos[2]/img_size[0], img_pos[3]/img_size[1]]]))[0] f_txt.write(f"{class_id} {xywh[0]:.5f} {xywh[1]:.5f} {xywh[2]:.5f} {xywh[3]:.5f}\n") # write label f_txt.close() shutil.copy(path_train_dir + '/images/' + img_id, img_dir) # count classes def count_classes(annotations): class_dict = { 'eye_opened': 0, 'eye_closed': 0, 'mouth_opened': 0, 'mouth_closed': 0, 'face': 0, 'phone': 0, 'cigar': 0 } for img_info in tqdm(annotations): for annotation_info in img_info['objects']: class_dict[annotation_info['class']] = class_dict[annotation_info['class']] + 1 print(class_dict) count_jsons = True if count_jsons == True: print('count classes') with open(new_dir + '/diet_train.json', 'r') as annotation_file: annotations = json.load(annotation_file) annotations = annotations['annotations'] print('diet_train.json') count_classes(annotations) with open(new_dir + '/raw_train.json', 'r') as annotation_file: annotations = json.load(annotation_file) annotations = annotations['annotations'] print('raw_train.json') count_classes(annotations) with open(new_dir + '/raw_val.json', 'r') as annotation_file: annotations = json.load(annotation_file) annotations = annotations['annotations'] print('raw_val.json') count_classes(annotations) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--random_seed', type=int, default=0, help='') parser.add_argument('--weights', type=str, default='', help='initial weights path') parser.add_argument('--cfg', type=str, default='models/hub/yolov5l6.yaml', help='model.yaml path') parser.add_argument('--data', type=str, default='data/drowsy_face.yaml', help='data.yaml path') parser.add_argument('--hyp', type=str, default='data/hyp.scratch-p6.yaml', help='hyperparameters path') parser.add_argument('--batch-size', type=int, default=4, help='total batch size for all GPUs') parser.add_argument('--img-size', nargs='+', type=int, default=[1280, 1280], help='[train, test] image sizes') parser.add_argument('--rect', action='store_true', help='rectangular training') parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training') parser.add_argument('--nosave', action='store_true', help='only save final checkpoint') parser.add_argument('--notest', action='store_true', help='only test final epoch') parser.add_argument('--noautoanchor', action='store_true', help='disable autoanchor check') parser.add_argument('--evolve', action='store_true', help='evolve hyperparameters') parser.add_argument('--bucket', type=str, default='', help='gsutil bucket') parser.add_argument('--cache-images', default='', action='store_true', help='cache images for faster training') parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training') parser.add_argument('--device', default='0', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%') parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class') parser.add_argument('--adam', action='store_true', help='use torch.optim.Adam() optimizer') parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode') parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify') parser.add_argument('--workers', type=int, default=8, help='maximum number of dataloader workers') parser.add_argument('--project', default='runs/train', help='save to project/name') parser.add_argument('--entity', default=None, help='W&B entity') parser.add_argument('--name', default='final', help='save to project/name') parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') parser.add_argument('--quad', action='store_true', help='quad dataloader') parser.add_argument('--linear-lr', action='store_true', help='linear LR') parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon') parser.add_argument('--upload_dataset', action='store_true', help='Upload dataset as W&B artifact table') parser.add_argument('--bbox_interval', type=int, default=300, help='Set bounding-box image logging interval for W&B') parser.add_argument('--artifact_alias', type=str, default="latest", help='version of dataset artifact to be used') ## for baseline training parser.add_argument('--no_data_prepare', action='store_true') parser.add_argument('--epochs', type=int, default=300) parser.add_argument('--epoch_parts', type=int, default=15, help='Log model after every "save_period" epoch') parser.add_argument('--save_period', type=int, default=300, help='Log model after every "save_period" epoch') ## for fine-tuning parser.add_argument('--fine_tune', action='store_true', help='fine_tune') parser.add_argument('--epochs_tune', type=int, default=50) parser.add_argument('--epoch_parts_tune', type=int, default=50, help='Log model after every "save_period" epoch') parser.add_argument('--save_period_tune', type=int, default=50, help='Log model after every "save_period" epoch') opt = parser.parse_args() if not opt.no_data_prepare: data_prepare() # Reproducibility torch.manual_seed(opt.random_seed) torch.cuda.manual_seed(opt.random_seed) torch.cuda.manual_seed_all(opt.random_seed) # if use multi-GPU torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False np.random.seed(opt.random_seed) random.seed(opt.random_seed) # Set DDP variables opt.world_size = int(os.environ['WORLD_SIZE']) if 'WORLD_SIZE' in os.environ else 1 opt.global_rank = int(os.environ['RANK']) if 'RANK' in os.environ else -1 set_logging(opt.global_rank) if opt.global_rank in [-1, 0]: check_requirements(exclude=('pycocotools', 'thop')) # Resume wandb_run = check_wandb_resume(opt) if opt.resume and not wandb_run: # resume an interrupted run ckpt = opt.resume if isinstance(opt.resume, str) else get_latest_run() # specified or most recent path assert os.path.isfile(ckpt), 'ERROR: --resume checkpoint does not exist' apriori = opt.global_rank, opt.local_rank with open(Path(ckpt).parent.parent / 'opt.yaml') as f: opt = argparse.Namespace(**yaml.safe_load(f)) # replace opt.cfg, opt.weights, opt.resume, opt.batch_size, opt.global_rank, opt.local_rank = \ '', ckpt, True, opt.total_batch_size, *apriori # reinstate logger.info('Resuming training from %s' % ckpt) else: # opt.hyp = opt.hyp or ('hyp.finetune.yaml' if opt.weights else 'hyp.scratch.yaml') opt.data, opt.cfg, opt.hyp = check_file(opt.data), check_file(opt.cfg), check_file(opt.hyp) # check files assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified' opt.img_size.extend([opt.img_size[-1]] * (2 - len(opt.img_size))) # extend to 2 sizes (train, test) opt.name = 'evolve' if opt.evolve else opt.name opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok | opt.evolve)) # DDP mode opt.total_batch_size = opt.batch_size device = select_device(opt.device, batch_size=opt.batch_size) if opt.local_rank != -1: assert torch.cuda.device_count() > opt.local_rank torch.cuda.set_device(opt.local_rank) device = torch.device('cuda', opt.local_rank) dist.init_process_group(backend='nccl', init_method='env://') # distributed backend assert opt.batch_size % opt.world_size == 0, '--batch-size must be multiple of CUDA device count' assert not opt.image_weights, '--image-weights argument is not compatible with DDP training' opt.batch_size = opt.total_batch_size // opt.world_size # Hyperparameters with open(opt.hyp) as f: hyp = yaml.safe_load(f) # load hyps # Train logger.info(opt) if not opt.evolve: tb_writer = None # init loggers if opt.global_rank in [-1, 0]: prefix = colorstr('tensorboard: ') logger.info(f"{prefix}Start with 'tensorboard --logdir {opt.project}', view at http://localhost:6006/") tb_writer = SummaryWriter(opt.save_dir) # Tensorboard train(hyp, opt, device, tb_writer) print("### base train completed") print("### fine-tuning start") opt.fine_tune = True opt.weights = opt.save_dir + '/weights/last.pt' opt.data = 'data/drowsy_face_tuning.yaml' opt.hyp = 'data/hyp.finetune-simple.yaml' opt.epochs = opt.epochs_tune opt.epoch_parts = opt.epoch_parts_tune opt.save_period = opt.save_period_tune opt.data, opt.cfg, opt.hyp = check_file(opt.data), check_file(opt.cfg), check_file(opt.hyp) # check files assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified' opt.img_size.extend([opt.img_size[-1]] * (2 - len(opt.img_size))) # extend to 2 sizes (train, test) opt.name = 'evolve' if opt.evolve else opt.name opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok | opt.evolve)) # Hyperparameters with open(opt.hyp) as f: hyp = yaml.safe_load(f) # load hyps # Train logger.info(opt) if not opt.evolve: tb_writer = None # init loggers if opt.global_rank in [-1, 0]: prefix = colorstr('tensorboard: ') logger.info(f"{prefix}Start with 'tensorboard --logdir {opt.project}', view at http://localhost:6006/") tb_writer = SummaryWriter(opt.save_dir) # Tensorboard train(hyp, opt, device, tb_writer)
import argparse import logging import math import os import random import time from copy import deepcopy from pathlib import Path from threading import Thread import numpy as np import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import torch.optim.lr_scheduler as lr_scheduler import torch.utils.data import yaml from torch.cuda import amp from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.tensorboard import SummaryWriter from tqdm import tqdm import torch import numpy as np import random import predict # import predict.py to get mAP after each epoch from models.experimental import attempt_load from models.yolo import Model from utils.autoanchor import check_anchors from utils.datasets import create_dataloader from utils.general import labels_to_class_weights, increment_path, labels_to_image_weights, init_seeds, \ fitness, strip_optimizer, get_latest_run, check_dataset, check_file, check_git_status, check_img_size, \ check_requirements, print_mutation, set_logging, one_cycle, colorstr from utils.google_utils import attempt_download from utils.loss import ComputeLoss from utils.plots import plot_images, plot_labels, plot_results, plot_evolution from utils.torch_utils import ModelEMA, select_device, intersect_dicts, torch_distributed_zero_first, de_parallel from utils.wandb_logging.wandb_utils import WandbLogger, check_wandb_resume import json from PIL import Image import os import shutil from os import path import sys sys.path.append(path.dirname( path.dirname( path.abspath(__file__) ) )) from utils.general import xyxy2xywh logger = logging.getLogger(__name__) def train(hyp, opt, device, tb_writer=None): logger.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items())) save_dir, epochs, batch_size, total_batch_size, weights, rank = \ Path(opt.save_dir), opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank # Directories wdir = save_dir / 'weights' wdir.mkdir(parents=True, exist_ok=True) # make dir last = wdir / 'last.pt' best = wdir / 'best.pt' results_file = save_dir / 'results.txt' # Save run settings with open(save_dir / 'hyp.yaml', 'w') as f: yaml.safe_dump(hyp, f, sort_keys=False) with open(save_dir / 'opt.yaml', 'w') as f: yaml.safe_dump(vars(opt), f, sort_keys=False) # Configure # plots = not opt.evolve # create plots plots = True # create plots cuda = device.type != 'cpu' init_seeds(1 + rank) with open(opt.data) as f: data_dict = yaml.safe_load(f) # data dict # Logging- Doing this before checking the dataset. Might update data_dict loggers = {'wandb': None} # loggers dict if rank in [-1, 0]: opt.hyp = hyp # add hyperparameters run_id = torch.load(weights).get('wandb_id') if weights.endswith('.pt') and os.path.isfile(weights) else None wandb_logger = WandbLogger(opt, save_dir.stem, run_id, data_dict) loggers['wandb'] = wandb_logger.wandb data_dict = wandb_logger.data_dict if wandb_logger.wandb: weights, epochs, hyp = opt.weights, opt.epochs, opt.hyp # WandbLogger might update weights, epochs if resuming nc = 1 if opt.single_cls else int(data_dict['nc']) # number of classes names = ['item'] if opt.single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names assert len(names) == nc, '%g names found for nc=%g dataset in %s' % (len(names), nc, opt.data) # check is_coco = opt.data.endswith('coco.yaml') and nc == 80 # COCO dataset # Model pretrained = weights.endswith('.pt') if pretrained: # with torch_distributed_zero_first(rank): # weights = attempt_download(weights) # download if not found locally ckpt = torch.load(weights, map_location=device) # load checkpoint model = Model(opt.cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create exclude = ['anchor'] if (opt.cfg or hyp.get('anchors')) and not opt.resume else [] # exclude keys state_dict = ckpt['model'].float().state_dict() # to FP32 state_dict = intersect_dicts(state_dict, model.state_dict(), exclude=exclude) # intersect model.load_state_dict(state_dict, strict=False) # load logger.info('Transferred %g/%g items from %s' % (len(state_dict), len(model.state_dict()), weights)) # report else: model = Model(opt.cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create with torch_distributed_zero_first(rank): check_dataset(data_dict) # check train_path = data_dict['train'] test_path = data_dict['val'] # Freeze freeze = ['1', '2', '3', '4', '5', '6' '7', '8', '9', '10', '11'] # parameter names to freeze (full or partial) freeze = ['model.' + number + '.' for number in freeze] for k, v in model.named_parameters(): v.requires_grad = True # train all layers if any(x in k for x in freeze) and opt.fine_tune is True: print('freezing %s' % k) v.requires_grad = False # Optimizer nbs = 64 # nominal batch size accumulate = max(round(nbs / total_batch_size), 1) # accumulate loss before optimizing hyp['weight_decay'] *= total_batch_size * accumulate / nbs # scale weight_decay logger.info(f"Scaled weight_decay = {hyp['weight_decay']}") pg0, pg1, pg2 = [], [], [] # optimizer parameter groups for k, v in model.named_modules(): if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter): pg2.append(v.bias) # biases if isinstance(v, nn.BatchNorm2d): pg0.append(v.weight) # no decay elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter): pg1.append(v.weight) # apply decay if opt.adam: optimizer = optim.Adam(pg0, lr=hyp['lr0'], betas=(hyp['momentum'], 0.999)) # adjust beta1 to momentum else: optimizer = optim.SGD(pg0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True) optimizer.add_param_group({'params': pg1, 'weight_decay': hyp['weight_decay']}) # add pg1 with weight_decay optimizer.add_param_group({'params': pg2}) # add pg2 (biases) logger.info('Optimizer groups: %g .bias, %g conv.weight, %g other' % (len(pg2), len(pg1), len(pg0))) del pg0, pg1, pg2 # Scheduler https://arxiv.org/pdf/1812.01187.pdf # https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#OneCycleLR if opt.linear_lr: lf = lambda x: (1 - x / (epochs - 1)) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear else: lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf'] scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) # plot_lr_scheduler(optimizer, scheduler, epochs) # EMA ema = ModelEMA(model) if rank in [-1, 0] else None # Resume start_epoch, best_fitness = 0, 0.0 if pretrained: # Optimizer if ckpt['optimizer'] is not None: optimizer.load_state_dict(ckpt['optimizer']) best_fitness = ckpt['best_fitness'] # EMA if ema and ckpt.get('ema'): ema.ema.load_state_dict(ckpt['ema'].float().state_dict()) ema.updates = ckpt['updates'] # Results if ckpt.get('training_results') is not None: results_file.write_text(ckpt['training_results']) # write results.txt # Epochs start_epoch = ckpt['epoch'] + 1 if opt.resume: assert start_epoch > 0, '%s training to %g epochs is finished, nothing to resume.' % (weights, epochs) if epochs < start_epoch: logger.info('%s has been trained for %g epochs. Fine-tuning for %g additional epochs.' % (weights, ckpt['epoch'], epochs)) epochs += ckpt['epoch'] # finetune additional epochs del ckpt, state_dict # Image sizes gs = max(int(model.stride.max()), 32) # grid size (max stride) nl = model.model[-1].nl # number of detection layers (used for scaling hyp['obj']) imgsz, imgsz_test = [check_img_size(x, gs) for x in opt.img_size] # verify imgsz are gs-multiples # DP mode if cuda and rank == -1 and torch.cuda.device_count() > 1: model = torch.nn.DataParallel(model) # SyncBatchNorm if opt.sync_bn and cuda and rank != -1: model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device) logger.info('Using SyncBatchNorm()') # Trainloader dataloader, dataset = create_dataloader(train_path, imgsz, batch_size, gs, opt, hyp=hyp, augment=True, cache=opt.cache_images, rect=opt.rect, rank=rank, world_size=opt.world_size, workers=opt.workers, image_weights=opt.image_weights, quad=opt.quad, prefix=colorstr('train: '), task='train', epoch_parts=opt.epoch_parts) mlc = np.concatenate(dataset.labels, 0)[:, 0].max() # max label class nb = len(dataloader) # number of batches assert mlc < nc, 'Label class %g exceeds nc=%g in %s. Possible class labels are 0-%g' % (mlc, nc, opt.data, nc - 1) # Process 0 if rank in [-1, 0]: testloader = create_dataloader(test_path, imgsz_test, batch_size * 2, gs, opt, # testloader hyp=hyp, cache=opt.cache_images and not opt.notest, rect=True, rank=-1, world_size=opt.world_size, workers=opt.workers, pad=0.5, prefix=colorstr('val: '))[0] if not opt.resume: labels = np.concatenate(dataset.labels, 0) c = torch.tensor(labels[:, 0]) # classes # cf = torch.bincount(c.long(), minlength=nc) + 1. # frequency # model._initialize_biases(cf.to(device)) if plots: plot_labels(labels, names, save_dir, loggers) if tb_writer: tb_writer.add_histogram('classes', c, 0) # Anchors if not opt.noautoanchor: check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz) model.half().float() # pre-reduce anchor precision # DDP mode if cuda and rank != -1: model = DDP(model, device_ids=[opt.local_rank], output_device=opt.local_rank, # nn.MultiheadAttention incompatibility with DDP https://github.com/pytorch/pytorch/issues/26698 find_unused_parameters=any(isinstance(layer, nn.MultiheadAttention) for layer in model.modules())) # Model parameters hyp['box'] *= 3. / nl # scale to layers hyp['cls'] *= nc / 80. * 3. / nl # scale to classes and layers hyp['obj'] *= (imgsz / 640) ** 2 * 3. / nl # scale to image size and layers hyp['label_smoothing'] = opt.label_smoothing model.nc = nc # attach number of classes to model model.hyp = hyp # attach hyperparameters to model model.gr = 1.0 # iou loss ratio (obj_loss = 1.0 or iou) model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc # attach class weights model.names = names # Start training t0 = time.time() nw = max(round(hyp['warmup_epochs'] * nb), 1000) # number of warmup iterations, max(3 epochs, 1k iterations) # nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training maps = np.zeros(nc) # mAP per class results = (0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls) scheduler.last_epoch = start_epoch - 1 # do not move scaler = amp.GradScaler(enabled=cuda) compute_loss = ComputeLoss(model) # init loss class logger.info(f'Image sizes {imgsz} train, {imgsz_test} test\n' f'Using {dataloader.num_workers} dataloader workers\n' f'Logging results to {save_dir}\n' f'Starting training for {epochs} epochs...') for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------ model.train() # Update image weights (optional) if opt.image_weights: # Generate indices if rank in [-1, 0]: cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc # class weights iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw) # image weights dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n) # rand weighted idx # Broadcast if DDP if rank != -1: indices = (torch.tensor(dataset.indices) if rank == 0 else torch.zeros(dataset.n)).int() dist.broadcast(indices, 0) if rank != 0: dataset.indices = indices.cpu().numpy() # Update mosaic border # b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs) # dataset.mosaic_border = [b - imgsz, -b] # height, width borders mloss = torch.zeros(4, device=device) # mean losses if rank != -1: dataloader.sampler.set_epoch(epoch) pbar = enumerate(dataloader) logger.info(('\n' + '%10s' * 8) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'total', 'labels', 'img_size')) if rank in [-1, 0]: pbar = tqdm(pbar, total=nb) # progress bar optimizer.zero_grad() for i, (imgs, targets, paths, _) in pbar: # batch ------------------------------------------------------------- ni = i + nb * epoch # number integrated batches (since train start) imgs = imgs.to(device, non_blocking=True).float() / 255.0 # uint8 to float32, 0-255 to 0.0-1.0 # Warmup if ni <= nw: xi = [0, nw] # x interp # model.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou) accumulate = max(1, np.interp(ni, xi, [1, nbs / total_batch_size]).round()) for j, x in enumerate(optimizer.param_groups): # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0 x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 2 else 0.0, x['initial_lr'] * lf(epoch)]) if 'momentum' in x: x['momentum'] = np.interp(ni, xi, [hyp['warmup_momentum'], hyp['momentum']]) # Multi-scale if opt.multi_scale: sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size sf = sz / max(imgs.shape[2:]) # scale factor if sf != 1: ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple) imgs = F.interpolate(imgs, size=ns, mode='bilinear', align_corners=False) # Forward with amp.autocast(enabled=cuda): pred = model(imgs) # forward loss, loss_items = compute_loss(pred, targets.to(device)) # loss scaled by batch_size if rank != -1: loss *= opt.world_size # gradient averaged between devices in DDP mode if opt.quad: loss *= 4. # Backward scaler.scale(loss).backward() # Optimize if ni % accumulate == 0: scaler.step(optimizer) # optimizer.step scaler.update() optimizer.zero_grad() if ema: ema.update(model) # Print if rank in [-1, 0]: mloss = (mloss * i + loss_items) / (i + 1) # update mean losses mem = '%.3gG' % (torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0) # (GB) s = ('%10s' * 2 + '%10.6g' * 6) % ( '%g/%g' % (epoch, epochs - 1), mem, *mloss, targets.shape[0], imgs.shape[-1]) pbar.set_description(s) # Plot if plots and ni < 3: f = save_dir / f'train_batch{ni}.jpg' # filename Thread(target=plot_images, args=(imgs, targets, paths, f), daemon=True).start() if tb_writer: tb_writer.add_graph(torch.jit.trace(de_parallel(model), imgs, strict=False), []) # model graph # tb_writer.add_image(f, result, dataformats='HWC', global_step=epoch) elif plots and ni == 10 and wandb_logger.wandb: wandb_logger.log({"Mosaics": [wandb_logger.wandb.Image(str(x), caption=x.name) for x in save_dir.glob('train*.jpg') if x.exists()]}) # end batch ------------------------------------------------------------------------------------------------ # end epoch ---------------------------------------------------------------------------------------------------- # Scheduler lr = [x['lr'] for x in optimizer.param_groups] # for tensorboard scheduler.step() # DDP process 0 or single-GPU if rank in [-1, 0]: # mAP ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'gr', 'names', 'stride', 'class_weights']) final_epoch = epoch + 1 == epochs if (epoch+1) % opt.save_period != 0: wandb_logger.current_epoch = epoch + 1 # Log tags = ['train/box_loss', 'train/obj_loss', 'train/cls_loss', # train loss 'x/lr0', 'x/lr1', 'x/lr2'] # params for x, tag in zip(list(mloss[:-1]) + lr, tags): if tb_writer: tb_writer.add_scalar(tag, x, epoch) # tensorboard if wandb_logger.wandb: wandb_logger.log({tag: x}) # W&B wandb_logger.end_epoch() # Write with open(results_file, 'a') as f: f.write(s + '\n') # append metrics, val_loss else: if not opt.notest or final_epoch: # Calculate mAP wandb_logger.current_epoch = epoch + 1 results, maps, times = predict.test(data_dict, batch_size=batch_size * 2, imgsz=imgsz_test, model=ema.ema, single_cls=opt.single_cls, dataloader=testloader, save_dir=save_dir, save_json=is_coco and final_epoch, verbose=nc < 50, plots=plots and final_epoch, wandb_logger=wandb_logger, compute_loss=compute_loss, is_coco=is_coco) # Write with open(results_file, 'a') as f: f.write(s + '%10.4g' * 8 % results + '\n') # append metrics, val_loss # Log tags = ['train/box_loss', 'train/obj_loss', 'train/cls_loss', # train loss 'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.75', 'metrics/mAP_0.5:0.95', 'val/box_loss', 'val/obj_loss', 'val/cls_loss', # val loss 'x/lr0', 'x/lr1', 'x/lr2'] # params for x, tag in zip(list(mloss[:-1]) + list(results) + lr, tags): if tb_writer: tb_writer.add_scalar(tag, x, epoch) # tensorboard if wandb_logger.wandb: wandb_logger.log({tag: x}) # W&B # Update best mAP fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.75, mAP@.5-.95] if fi > best_fitness: best_fitness = fi wandb_logger.end_epoch(best_result=best_fitness == fi) # Save model if (not opt.nosave) or (final_epoch and not opt.evolve): # if save ckpt = {'epoch': epoch, 'best_fitness': best_fitness, 'training_results': results_file.read_text(), 'model': deepcopy(de_parallel(model)).half(), 'ema': deepcopy(ema.ema).half(), 'updates': ema.updates, 'optimizer': optimizer.state_dict(), 'wandb_id': wandb_logger.wandb_run.id if wandb_logger.wandb else None} # Save last, best and delete torch.save(ckpt, last) if best_fitness == fi: torch.save(ckpt, best) if wandb_logger.wandb: if ((epoch + 1) % opt.save_period == 0 and not final_epoch) and opt.save_period != -1: wandb_logger.log_model( last.parent, opt, epoch, fi, best_model=best_fitness == fi) del ckpt # end epoch ---------------------------------------------------------------------------------------------------- # end training if rank in [-1, 0]: logger.info(f'{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.\n') if plots: plot_results(save_dir=save_dir) # save as results.png if wandb_logger.wandb: files = ['results.png', 'confusion_matrix.png', *[f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R')]] wandb_logger.log({"Results": [wandb_logger.wandb.Image(str(save_dir / f), caption=f) for f in files if (save_dir / f).exists()]}) if not opt.evolve: if is_coco: # COCO dataset for m in [last, best] if best.exists() else [last]: # speed, mAP tests results, _, _ = predict.test(opt.data, batch_size=batch_size * 2, imgsz=imgsz_test, conf_thres=0.001, iou_thres=0.7, model=attempt_load(m, device).half(), single_cls=opt.single_cls, dataloader=testloader, save_dir=save_dir, save_json=True, plots=False, is_coco=is_coco) # Strip optimizers for f in last, best: if f.exists(): strip_optimizer(f) # strip optimizers if wandb_logger.wandb: # Log the stripped model wandb_logger.wandb.log_artifact(str(best if best.exists() else last), type='model', name='run_' + wandb_logger.wandb_run.id + '_model', aliases=['latest', 'best', 'stripped']) wandb_logger.finish_run() else: dist.destroy_process_group() torch.cuda.empty_cache() return results def data_prepare(): random.seed(100) names = ['eye_opened', 'eye_closed', 'mouth_opened', 'mouth_closed', 'face', 'phone', 'cigar'] path_train_dir = '/DATA/Final_DATA/task03_train' new_dir = '../drowsy_face' # generate raw_train.json, raw_val.json generate_raw_json = True if generate_raw_json == True: print('generate raw_train.json, raw_val.json') if os.path.exists(new_dir): shutil.rmtree(new_dir) os.makedirs(new_dir + '/images/train') os.makedirs(new_dir + '/images/val') os.makedirs(new_dir + '/labels/train') os.makedirs(new_dir + '/labels/val') with open(path_train_dir + '/labels.json') as f: json_data = json.load(f) json_anno = json_data["annotations"] num_data = len(json_anno) # 273224 val_idx = random.sample(list(range(num_data)), 20000) json_anno_val = [] json_anno_train = [] for idx, json_img in enumerate(tqdm(json_anno)): if idx in val_idx: json_anno_val.append(json_img) else: json_anno_train.append(json_img) json_data_val = {} json_data_val['annotations'] = json_anno_val json_data_train = {} json_data_train['annotations'] = json_anno_train if os.path.isfile(new_dir + '/raw_val.json'): os.remove(new_dir + '/raw_val.json') if os.path.isfile(new_dir + '/raw_train.json'): os.remove(new_dir + '/raw_train.json') with open(new_dir + '/raw_val.json', 'w') as f_val: json.dump(json_data_val, f_val) with open(new_dir + '/raw_train.json', 'w') as f_train: json.dump(json_data_train, f_train) # generate drowsy_face/train, drowsy_face/val generate_drowsy_face = True if generate_drowsy_face == True: print('generate drowsy_face/train, drowsy_face/val') with open(new_dir + '/raw_val.json') as f: json_data = json.load(f) json_anno = json_data["annotations"] for json_img in tqdm(json_anno): img_id = json_img['file_name'] txt_dir = new_dir + '/labels/val/' + img_id.split('.')[0] + '.txt' img_dir = new_dir + '/images/val/' + img_id f_txt = open(txt_dir, 'w') img_ = Image.open(path_train_dir + '/images/' + img_id) img_size = img_.size objects_yolo = '' for img_obj in json_img['objects']: class_id = str(names.index(img_obj['class'])) img_pos = img_obj['position'] xywh = xyxy2xywh(np.array([[img_pos[0]/img_size[0], img_pos[1]/img_size[1], img_pos[2]/img_size[0], img_pos[3]/img_size[1]]]))[0] f_txt.write(f"{class_id} {xywh[0]:.5f} {xywh[1]:.5f} {xywh[2]:.5f} {xywh[3]:.5f}\n") # write label f_txt.close() shutil.copy(path_train_dir + '/images/' + img_id, img_dir) with open(new_dir + '/raw_train.json') as f: json_data = json.load(f) json_anno = json_data["annotations"] for json_img in tqdm(json_anno): img_id = json_img['file_name'] txt_dir = new_dir + '/labels/train/' + img_id.split('.')[0] + '.txt' img_dir = new_dir + '/images/train/' + img_id f_txt = open(txt_dir, 'w') img_ = Image.open(path_train_dir + '/images/' + img_id) img_size = img_.size objects_yolo = '' for img_obj in json_img['objects']: class_id = str(names.index(img_obj['class'])) img_pos = img_obj['position'] xywh = xyxy2xywh(np.array([[img_pos[0]/img_size[0], img_pos[1]/img_size[1], img_pos[2]/img_size[0], img_pos[3]/img_size[1]]]))[0] f_txt.write(f"{class_id} {xywh[0]:.5f} {xywh[1]:.5f} {xywh[2]:.5f} {xywh[3]:.5f}\n") # write label f_txt.close() shutil.copy(path_train_dir + '/images/' + img_id, img_dir) # generate diet_train.json generate_diet_json = True if generate_diet_json == True: print('generate diet_train.json') json_anno_diet = [] with open(path_train_dir + '/labels.json') as f: json_data = json.load(f) json_anno = json_data["annotations"] fidx = 0 for img_info in tqdm(json_anno): file_name = img_info['file_name'] cigar_check = 0 phone_check = 0 eye_closed_check = 0 mouth_closed_check = 0 mouth_opened_check = 0 for annotation_info in img_info['objects']: if annotation_info['class'] == 'cigar': cigar_check = 1 elif annotation_info['class'] == 'phone': phone_check = 1 elif annotation_info['class'] == 'eye_closed': eye_closed_check = 1 elif annotation_info['class'] == 'mouth_closed': mouth_closed_check = 1 elif annotation_info['class'] == 'mouth_opened': mouth_opened_check = 1 if cigar_check or phone_check: json_anno_diet.append(img_info) elif eye_closed_check and mouth_closed_check: json_anno_diet.append(img_info) elif eye_closed_check and mouth_opened_check: json_anno_diet.append(img_info) elif mouth_opened_check: fidx = fidx + 1 if fidx % 3 == 0: json_anno_diet.append(img_info) json_data_diet = {} json_data_diet['annotations'] = json_anno_diet if os.path.isfile(new_dir + '/diet_train.json'): os.remove(new_dir + '/diet_train.json') with open(new_dir + '/diet_train.json', 'w') as f_diet: json.dump(json_data_diet, f_diet) # generate drowsy_face_diet/train generate_drowsy_face_diet = True if generate_drowsy_face_diet == True: print('generate drowsy_face_diet/train') new_dir_diet = '../drowsy_face_diet' if os.path.exists(new_dir_diet): shutil.rmtree(new_dir_diet) os.makedirs(new_dir_diet + '/images/train') os.makedirs(new_dir_diet + '/labels/train') with open(new_dir + '/diet_train.json') as f: json_data = json.load(f) json_anno = json_data["annotations"] for json_img in tqdm(json_anno): img_id = json_img['file_name'] txt_dir = new_dir_diet + '/labels/train/' + img_id.split('.')[0] + '.txt' img_dir = new_dir_diet + '/images/train/' + img_id f_txt = open(txt_dir, 'w') img_ = Image.open(path_train_dir + '/images/' + img_id) img_size = img_.size objects_yolo = '' for img_obj in json_img['objects']: class_id = str(names.index(img_obj['class'])) img_pos = img_obj['position'] xywh = xyxy2xywh(np.array([[img_pos[0]/img_size[0], img_pos[1]/img_size[1], img_pos[2]/img_size[0], img_pos[3]/img_size[1]]]))[0] f_txt.write(f"{class_id} {xywh[0]:.5f} {xywh[1]:.5f} {xywh[2]:.5f} {xywh[3]:.5f}\n") # write label f_txt.close() shutil.copy(path_train_dir + '/images/' + img_id, img_dir) # count classes def count_classes(annotations): class_dict = { 'eye_opened': 0, 'eye_closed': 0, 'mouth_opened': 0, 'mouth_closed': 0, 'face': 0, 'phone': 0, 'cigar': 0 } for img_info in tqdm(annotations): for annotation_info in img_info['objects']: class_dict[annotation_info['class']] = class_dict[annotation_info['class']] + 1 print(class_dict) count_jsons = True if count_jsons == True: print('count classes') with open(new_dir + '/diet_train.json', 'r') as annotation_file: annotations = json.load(annotation_file) annotations = annotations['annotations'] print('diet_train.json') count_classes(annotations) with open(new_dir + '/raw_train.json', 'r') as annotation_file: annotations = json.load(annotation_file) annotations = annotations['annotations'] print('raw_train.json') count_classes(annotations) with open(new_dir + '/raw_val.json', 'r') as annotation_file: annotations = json.load(annotation_file) annotations = annotations['annotations'] print('raw_val.json') count_classes(annotations) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--random_seed', type=int, default=0, help='') parser.add_argument('--weights', type=str, default='', help='initial weights path') parser.add_argument('--cfg', type=str, default='models/hub/yolov5l6.yaml', help='model.yaml path') parser.add_argument('--data', type=str, default='data/drowsy_face.yaml', help='data.yaml path') parser.add_argument('--hyp', type=str, default='data/hyp.scratch-p6.yaml', help='hyperparameters path') parser.add_argument('--batch-size', type=int, default=4, help='total batch size for all GPUs') parser.add_argument('--img-size', nargs='+', type=int, default=[1280, 1280], help='[train, test] image sizes') parser.add_argument('--rect', action='store_true', help='rectangular training') parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training') parser.add_argument('--nosave', action='store_true', help='only save final checkpoint') parser.add_argument('--notest', action='store_true', help='only test final epoch') parser.add_argument('--noautoanchor', action='store_true', help='disable autoanchor check') parser.add_argument('--evolve', action='store_true', help='evolve hyperparameters') parser.add_argument('--bucket', type=str, default='', help='gsutil bucket') parser.add_argument('--cache-images', default='', action='store_true', help='cache images for faster training') parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training') parser.add_argument('--device', default='0', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%') parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class') parser.add_argument('--adam', action='store_true', help='use torch.optim.Adam() optimizer') parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode') parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify') parser.add_argument('--workers', type=int, default=8, help='maximum number of dataloader workers') parser.add_argument('--project', default='runs/train', help='save to project/name') parser.add_argument('--entity', default=None, help='W&B entity') parser.add_argument('--name', default='final', help='save to project/name') parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') parser.add_argument('--quad', action='store_true', help='quad dataloader') parser.add_argument('--linear-lr', action='store_true', help='linear LR') parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon') parser.add_argument('--upload_dataset', action='store_true', help='Upload dataset as W&B artifact table') parser.add_argument('--bbox_interval', type=int, default=300, help='Set bounding-box image logging interval for W&B') parser.add_argument('--artifact_alias', type=str, default="latest", help='version of dataset artifact to be used') ## for baseline training parser.add_argument('--no_data_prepare', action='store_true') parser.add_argument('--epochs', type=int, default=300) parser.add_argument('--epoch_parts', type=int, default=15, help='Log model after every "save_period" epoch') parser.add_argument('--save_period', type=int, default=300, help='Log model after every "save_period" epoch') ## for fine-tuning parser.add_argument('--fine_tune', action='store_true', help='fine_tune') parser.add_argument('--epochs_tune', type=int, default=50) parser.add_argument('--epoch_parts_tune', type=int, default=50, help='Log model after every "save_period" epoch') parser.add_argument('--save_period_tune', type=int, default=50, help='Log model after every "save_period" epoch') opt = parser.parse_args() if not opt.no_data_prepare: data_prepare() # Reproducibility torch.manual_seed(opt.random_seed) torch.cuda.manual_seed(opt.random_seed) torch.cuda.manual_seed_all(opt.random_seed) # if use multi-GPU torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False np.random.seed(opt.random_seed) random.seed(opt.random_seed) # Set DDP variables opt.world_size = int(os.environ['WORLD_SIZE']) if 'WORLD_SIZE' in os.environ else 1 opt.global_rank = int(os.environ['RANK']) if 'RANK' in os.environ else -1 set_logging(opt.global_rank) if opt.global_rank in [-1, 0]: check_requirements(exclude=('pycocotools', 'thop')) # Resume wandb_run = check_wandb_resume(opt) if opt.resume and not wandb_run: # resume an interrupted run ckpt = opt.resume if isinstance(opt.resume, str) else get_latest_run() # specified or most recent path assert os.path.isfile(ckpt), 'ERROR: --resume checkpoint does not exist' apriori = opt.global_rank, opt.local_rank with open(Path(ckpt).parent.parent / 'opt.yaml') as f: opt = argparse.Namespace(**yaml.safe_load(f)) # replace opt.cfg, opt.weights, opt.resume, opt.batch_size, opt.global_rank, opt.local_rank = \ '', ckpt, True, opt.total_batch_size, *apriori # reinstate logger.info('Resuming training from %s' % ckpt) else: # opt.hyp = opt.hyp or ('hyp.finetune.yaml' if opt.weights else 'hyp.scratch.yaml') opt.data, opt.cfg, opt.hyp = check_file(opt.data), check_file(opt.cfg), check_file(opt.hyp) # check files assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified' opt.img_size.extend([opt.img_size[-1]] * (2 - len(opt.img_size))) # extend to 2 sizes (train, test) opt.name = 'evolve' if opt.evolve else opt.name opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok | opt.evolve)) # DDP mode opt.total_batch_size = opt.batch_size device = select_device(opt.device, batch_size=opt.batch_size) if opt.local_rank != -1: assert torch.cuda.device_count() > opt.local_rank torch.cuda.set_device(opt.local_rank) device = torch.device('cuda', opt.local_rank) dist.init_process_group(backend='nccl', init_method='env://') # distributed backend assert opt.batch_size % opt.world_size == 0, '--batch-size must be multiple of CUDA device count' assert not opt.image_weights, '--image-weights argument is not compatible with DDP training' opt.batch_size = opt.total_batch_size // opt.world_size # Hyperparameters with open(opt.hyp) as f: hyp = yaml.safe_load(f) # load hyps # Train logger.info(opt) if not opt.evolve: tb_writer = None # init loggers if opt.global_rank in [-1, 0]: prefix = colorstr('tensorboard: ') logger.info(f"{prefix}Start with 'tensorboard --logdir {opt.project}', view at http://localhost:6006/") tb_writer = SummaryWriter(opt.save_dir) # Tensorboard train(hyp, opt, device, tb_writer) print("### base train completed") print("### fine-tuning start") opt.fine_tune = True opt.weights = opt.save_dir + '/weights/last.pt' opt.data = 'data/drowsy_face_tuning.yaml' opt.hyp = 'data/hyp.finetune-simple.yaml' opt.epochs = opt.epochs_tune opt.epoch_parts = opt.epoch_parts_tune opt.save_period = opt.save_period_tune opt.data, opt.cfg, opt.hyp = check_file(opt.data), check_file(opt.cfg), check_file(opt.hyp) # check files assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified' opt.img_size.extend([opt.img_size[-1]] * (2 - len(opt.img_size))) # extend to 2 sizes (train, test) opt.name = 'evolve' if opt.evolve else opt.name opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok | opt.evolve)) # Hyperparameters with open(opt.hyp) as f: hyp = yaml.safe_load(f) # load hyps # Train logger.info(opt) if not opt.evolve: tb_writer = None # init loggers if opt.global_rank in [-1, 0]: prefix = colorstr('tensorboard: ') logger.info(f"{prefix}Start with 'tensorboard --logdir {opt.project}', view at http://localhost:6006/") tb_writer = SummaryWriter(opt.save_dir) # Tensorboard train(hyp, opt, device, tb_writer)
import concurrent.futures import secrets from enum import Enum from ipaddress import ip_address from typing import Tuple, Set, Dict, Callable from urllib import parse import pytz import urllib3 from CommonServerUserPython import * # noqa from CommonServerPython import * # noqa # pylint: disable=unused-wildcard-import # Disable insecure warnings urllib3.disable_warnings() # pylint: disable=no-member ''' ADVANCED GLOBAL PARAMETERS ''' SAMPLE_SIZE = 2 # number of samples to store in integration context EVENTS_INTERVAL_SECS = 15 # interval between events polling EVENTS_FAILURE_LIMIT = 3 # amount of consecutive failures events fetch will tolerate FAILURE_SLEEP = 15 # sleep between consecutive failures events fetch FETCH_SLEEP = 60 # sleep between fetches BATCH_SIZE = 100 # batch size used for offense ip enrichment OFF_ENRCH_LIMIT = BATCH_SIZE * 10 # max amount of IPs to enrich per offense MAX_WORKERS = 8 # max concurrent workers used for events enriching DOMAIN_ENRCH_FLG = 'true' # when set to true, will try to enrich offense and assets with domain names RULES_ENRCH_FLG = 'true' # when set to true, will try to enrich offense with rule names MAX_FETCH_EVENT_RETIRES = 3 # max iteration to try search the events of an offense SLEEP_FETCH_EVENT_RETIRES = 10 # sleep between iteration to try search the events of an offense MAX_NUMBER_OF_OFFENSES_TO_CHECK_SEARCH = 5 # Number of offenses to check during mirroring if search was completed. DEFAULT_EVENTS_TIMEOUT = 30 # default timeout for the events enrichment in minutes PROFILING_DUMP_ROWS_LIMIT = 20 ADVANCED_PARAMETERS_STRING_NAMES = [ 'DOMAIN_ENRCH_FLG', 'RULES_ENRCH_FLG', ] ADVANCED_PARAMETER_INT_NAMES = [ 'EVENTS_INTERVAL_SECS', 'EVENTS_FAILURE_LIMIT', 'FAILURE_SLEEP', 'FETCH_SLEEP', 'BATCH_SIZE', 'OFF_ENRCH_LIMIT', 'MAX_WORKERS', 'MAX_FETCH_EVENT_RETIRES', 'SLEEP_FETCH_EVENT_RETIRES', 'DEFAULT_EVENTS_TIMEOUT', 'PROFILING_DUMP_ROWS_LIMIT', ] ''' CONSTANTS ''' API_USERNAME = '_api_token_key' RESET_KEY = 'reset' LAST_FETCH_KEY = 'id' MINIMUM_API_VERSION = 10.1 DEFAULT_RANGE_VALUE = '0-49' DEFAULT_TIMEOUT_VALUE = '35' DEFAULT_LIMIT_VALUE = 50 MAXIMUM_MIRROR_LIMIT = 100 DEFAULT_EVENTS_LIMIT = 20 MAXIMUM_OFFENSES_PER_FETCH = 50 DEFAULT_OFFENSES_PER_FETCH = 20 DEFAULT_MIRRORING_DIRECTION = 'No Mirroring' MIRROR_OFFENSE_AND_EVENTS = 'Mirror Offense and Events' MIRROR_DIRECTION: Dict[str, Optional[str]] = { 'No Mirroring': None, 'Mirror Offense': 'In', MIRROR_OFFENSE_AND_EVENTS: 'In' } MIRRORED_OFFENSES_CTX_KEY = 'mirrored_offenses' UPDATED_MIRRORED_OFFENSES_CTX_KEY = 'updated_mirrored_offenses' RESUBMITTED_MIRRORED_OFFENSES_CTX_KEY = 'resubmitted_mirrored_offenses' UTC_TIMEZONE = pytz.timezone('utc') ID_QUERY_REGEX = re.compile(r'(?:\s+|^)id((\s)*)>(=?)((\s)*)((\d)+)(?:\s+|$)') ASCENDING_ID_ORDER = '+id' EXECUTOR = concurrent.futures.ThreadPoolExecutor(max_workers=MAX_WORKERS) ''' OUTPUT FIELDS REPLACEMENT MAPS ''' OFFENSE_OLD_NEW_NAMES_MAP = { 'credibility': 'Credibility', 'relevance': 'Relevance', 'severity': 'Severity', 'assigned_to': 'AssignedTo', 'destination_networks': 'DestinationHostname', 'status': 'Status', 'closing_user': 'ClosingUser', 'closing_reason_id': 'ClosingReason', 'close_time': 'CloseTime', 'categories': 'Categories', 'follow_up': 'Followup', 'id': 'ID', 'description': 'Description', 'source_address_ids': 'SourceAddress', 'local_destination_address_ids': 'DestinationAddress', 'remote_destination_count': 'RemoteDestinationCount', 'start_time': 'StartTime', 'event_count': 'EventCount', 'flow_count': 'FlowCount', 'offense_source': 'OffenseSource', 'magnitude': 'Magnitude', 'last_updated_time': 'LastUpdatedTime', 'offense_type': 'OffenseType', 'protected': 'Protected', 'LinkToOffense': 'LinkToOffense', 'rules': 'Rules', 'domain_name': 'DomainName', 'assets': 'Assets' } CLOSING_REASONS_OLD_NEW_MAP = { 'id': 'ID', 'text': 'Name', 'is_reserved': 'IsReserved', 'is_deleted': 'IsDeleted' } NOTES_OLD_NEW_MAP = { 'id': 'ID', 'note_text': 'Text', 'create_time': 'CreateTime', 'username': 'CreatedBy' } RULES_OLD_NEW_MAP = { 'owner': 'Owner', 'base_host_id': 'BaseHostID', 'capacity_timestamp': 'CapacityTimestamp', 'origin': 'Origin', 'creation_date': 'CreationDate', 'type': 'Type', 'enabled': 'Enabled', 'modification_date': 'ModificationDate', 'name': 'Name', 'average_capacity': 'AverageCapacity', 'id': 'ID', 'base_capacity': 'BaseCapacity' } RULES_GROUP_OLD_NEW_MAP = { 'owner': 'Owner', 'modified_time': 'ModifiedTime', 'level': 'Level', 'name': 'Name', 'description': 'Description', 'id': 'ID', 'child_groups': 'ChildGroups', 'child_items': 'ChildItems', 'type': 'Type', 'parent_id': 'ParentID' } ASSET_OLD_NEW_MAP = { 'vulnerability_count': 'VulnerabilityCount', 'interfaces': 'Interfaces', 'risk_score_sum': 'RiskScoreSum', 'hostnames': 'Hostnames', 'id': 'ID', 'users': 'Users', 'domain_id': 'DomainID', 'properties': 'Properties', 'products': 'Products' } SEARCH_OLD_NEW_MAP = {'search_id': 'ID', 'status': 'Status'} REFERENCE_SETS_OLD_NEW_MAP = { 'number_of_elements': 'NumberOfElements', 'name': 'Name', 'creation_time': 'CreationTime', 'element_type': 'ElementType', 'time_to_live': 'TimeToLive', 'timeout_type': 'TimeoutType', 'data': 'Data', } REFERENCE_SET_DATA_OLD_NEW_MAP = { 'last_seen': 'LastSeen', 'source': 'Source', 'value': 'Value', 'first_seen': 'FirstSeen' } DOMAIN_OLD_NEW_MAP = { 'asset_scanner_ids': 'AssetScannerIDs', 'custom_properties': 'CustomProperties', 'deleted': 'Deleted', 'description': 'Description', 'event_collector_ids': 'EventCollectorIDs', 'flow_collector_ids': 'FlowCollectorIDs', 'flow_source_ids': 'FlowSourceIDs', 'id': 'ID', 'log_source_ids': 'LogSourceIDs', 'log_source_group_ids': 'LogSourceGroupIDs', 'name': 'Name', 'qvm_scanner_ids': 'QVMScannerIDs', 'tenant_id': 'TenantID' } SAVED_SEARCH_OLD_NEW_MAP = { 'owner': 'Owner', 'description': 'Description', 'creation_date': 'CreationDate', 'uid': 'UID', 'database': 'Database', 'is_quick_search': 'QuickSearch', 'name': 'Name', 'modified_date': 'ModifiedDate', 'id': 'ID', 'aql': 'AQL', 'is_shared': 'IsShared' } IP_GEOLOCATION_OLD_NEW_MAP = { 'continent': 'Continent', 'traits': 'Traits', 'geo_json': 'Geolocation', 'city': 'City', 'ip_address': 'IPAddress', 'represented_country': 'RepresentedCountry', 'registered_country': 'RegisteredCountry', 'is_local': 'IsLocalCountry', 'location': 'Location', 'postal': 'Postal', 'physical_country': 'PhysicalCountry', 'subdivisions': 'SubDivisions' } LOG_SOURCES_OLD_NEW_MAP = { 'sending_ip': 'SendingIP', 'internal': 'Internal', 'protocol_parameters': 'ProtocolParameters', 'description': 'Description', 'enabled': 'Enabled', 'group_ids': 'GroupIDs', 'credibility': 'Credibility', 'id': 'ID', 'protocol_type_id': 'ProtocolTypeID', 'creation_date': 'CreationDate', 'name': 'Name', 'modified_date': 'ModifiedDate', 'auto_discovered': 'AutoDiscovered', 'type_id': 'TypeID', 'last_event_time': 'LastEventTime', 'gateway': 'Gateway', 'status': 'Status' } USECS_ENTRIES = {'last_persisted_time', 'start_time', 'close_time', 'create_time', 'creation_time', 'creation_date', 'last_updated_time', 'first_persisted_time', 'modification_date', 'last_seen', 'first_seen', 'starttime', 'devicetime', 'last_reported', 'created', 'last_seen_profiler', 'last_seen_scanner', 'first_seen_scanner', 'first_seen_profiler', 'modified_time', 'last_event_time', 'modified_date', 'first_event_flow_seen', 'last_event_flow_seen'} LOCAL_DESTINATION_IPS_OLD_NEW_MAP = { 'domain_id': 'DomainID', 'event_flow_count': 'EventFlowCount', 'first_event_flow_seen': 'FirstEventFlowSeen', 'id': 'ID', 'last_event_flow_seen': 'LastEventFlowSeen', 'local_destination_ip': 'LocalDestinationIP', 'magnitude': 'Magnitude', 'network': 'Network', 'offense_ids': 'OffenseIDs', 'source_address_ids': 'SourceAddressIDs' } SOURCE_IPS_OLD_NEW_MAP = { 'domain_id': 'DomainID', 'event_flow_count': 'EventFlowCount', 'first_event_flow_seen': 'FirstEventFlowSeen', 'id': 'ID', 'last_event_flow_seen': 'LastEventFlowSeen', 'local_destination_address_ids': 'LocalDestinationAddressIDs', 'magnitude': 'Magnitude', 'network': 'Network', 'offense_ids': 'OffenseIDs', 'source_ip': 'SourceIP' } ''' ENRICHMENT MAPS ''' ASSET_PROPERTIES_NAME_MAP = { 'Unified Name': 'Name', 'CVSS Collateral Damage Potential': 'AggregatedCVSSScore', 'Weight': 'Weight' } FULL_ASSET_PROPERTIES_NAMES_MAP = { 'Compliance Notes': 'ComplianceNotes', 'Compliance Plan': 'CompliancePlan', 'Location': 'Location', 'Switch ID': 'SwitchID', 'Switch Port ID': 'SwitchPort', 'Group Name': 'GroupName', 'Vulnerabilities': 'Vulnerabilities', } LONG_RUNNING_REQUIRED_PARAMS = {'fetch_mode': 'Fetch mode', 'offenses_per_fetch': 'Number of offenses to pull per API call (max 50)', 'events_limit': 'Maximum number of events per incident.'} ''' ENUMS ''' class FetchMode(Enum): """ Enums for the options of fetching the incidents. """ no_events = 'Fetch Without Events' all_events = 'Fetch With All Events' correlations_events_only = 'Fetch Correlation Events Only' ''' CLIENT CLASS ''' class Client(BaseClient): def __init__(self, server: str, verify: bool, proxy: bool, api_version: str, credentials: Dict): username = credentials.get('identifier') password = credentials.get('password') if username == API_USERNAME: self.base_headers = {'Version': api_version, 'SEC': password} auth = None else: auth = (username, password) self.base_headers = {'Version': api_version} base_url = urljoin(server, '/api') super().__init__(base_url=base_url, verify=verify, proxy=proxy, auth=auth) self.password = password self.server = server def http_request(self, method: str, url_suffix: str, params: Optional[Dict] = None, json_data: Optional[Dict] = None, additional_headers: Optional[Dict] = None, timeout: Optional[int] = None): headers = {**additional_headers, **self.base_headers} if additional_headers else self.base_headers return self._http_request( method=method, url_suffix=url_suffix, params=params, json_data=json_data, headers=headers, error_handler=self.qradar_error_handler, timeout=timeout ) @staticmethod def qradar_error_handler(res: requests.Response): """ QRadar error handler for any error occurred during the API request. This function job is to translate the known exceptions returned by QRadar to human readable exception to help the user understand why the request have failed. If error returned is not in the expected error format, raises the exception as is. Args: res (Any): The error response returned by QRadar. Returns: - raises DemistoException. """ err_msg = f'Error in API call [{res.status_code}] - {res.reason}' try: # Try to parse json error response error_entry = res.json() message = error_entry.get('message', '') if 'items=x-y' in message: message = 'Failed to parse Range argument. The syntax of the Range argument must follow this pattern:' \ ' x-y' elif 'unauthorized to access' in err_msg or 'No SEC header present in request' in err_msg: message = 'Authorization Error: make sure credentials are correct.' elif 'The specified encryption strength is not available' in err_msg: err_msg = '' message = 'The specified encryption is not available, try using a weaker encryption (AES128).' elif 'User has insufficient capabilities to access this endpoint resource' in message: message = 'The given credentials do not have the needed permissions to perform the call the endpoint' \ f'\n{res.request.path_url}.\n' \ 'Please supply credentials with the needed permissions as can be seen in the integration ' \ 'description, or do not call or enrich offenses with the mentioned endpoint.' err_msg += f'\n{message}' raise DemistoException(err_msg, res=res) except ValueError: err_msg += '\n{}'.format(res.text) raise DemistoException(err_msg, res=res) def offenses_list(self, range_: Optional[str] = None, offense_id: Optional[int] = None, filter_: Optional[str] = None, fields: Optional[str] = None, sort: Optional[str] = None): id_suffix = f'/{offense_id}' if offense_id else '' params = assign_params(fields=fields) if offense_id else assign_params(filter=filter_, fields=fields, sort=sort) additional_headers = {'Range': range_} if not offense_id else None return self.http_request( method='GET', url_suffix=f'/siem/offenses{id_suffix}', params=params, additional_headers=additional_headers ) def offense_update(self, offense_id: int, protected: Optional[str] = None, follow_up: Optional[str] = None, status: Optional[str] = None, closing_reason_id: Optional[int] = None, assigned_to: Optional[str] = None, fields: Optional[str] = None): return self.http_request( method='POST', url_suffix=f'/siem/offenses/{offense_id}', params=assign_params( protected=protected, follow_up=follow_up, status=status, closing_reason_id=closing_reason_id, assigned_to=assigned_to, fields=fields ) ) def closing_reasons_list(self, closing_reason_id: Optional[int] = None, include_reserved: Optional[bool] = None, include_deleted: Optional[bool] = None, range_: Optional[str] = None, filter_: Optional[str] = None, fields: Optional[str] = None): id_suffix = f'/{closing_reason_id}' if closing_reason_id else '' params = assign_params(fields=fields) if closing_reason_id else assign_params(include_reserved=include_reserved, include_deleted=include_deleted, filter=filter_, fields=fields) additional_headers = {'Range': range_} if not closing_reason_id and range_ else None return self.http_request( method='GET', url_suffix=f'/siem/offense_closing_reasons{id_suffix}', additional_headers=additional_headers, params=params ) def offense_notes_list(self, offense_id: int, range_: str, note_id: Optional[int] = None, filter_: Optional[str] = None, fields: Optional[str] = None): note_id_suffix = f'/{note_id}' if note_id else '' params = assign_params(fields=fields) if note_id else assign_params(filter=filter_, fields=fields) additional_headers = {'Range': range_} if not note_id else None return self.http_request( method='GET', url_suffix=f'/siem/offenses/{offense_id}/notes{note_id_suffix}', additional_headers=additional_headers, params=params ) def offense_notes_create(self, offense_id: int, note_text: str, fields: Optional[str] = None): return self.http_request( method='POST', url_suffix=f'/siem/offenses/{offense_id}/notes', params=assign_params(note_text=note_text, fields=fields) ) def rules_list(self, rule_id: Optional[str] = None, range_: Optional[str] = None, filter_: Optional[str] = None, fields: Optional[str] = None): id_suffix = f'/{rule_id}' if rule_id else '' params = assign_params(fields=fields) if rule_id else assign_params(filter=filter_, fields=fields) additional_headers = {'Range': range_} if range_ and not rule_id else None return self.http_request( method='GET', url_suffix=f'/analytics/rules{id_suffix}', params=params, additional_headers=additional_headers ) def rule_groups_list(self, range_: str, rule_group_id: Optional[int] = None, filter_: Optional[str] = None, fields: Optional[str] = None): id_suffix = f'/{rule_group_id}' if rule_group_id else '' additional_headers = {'Range': range_} if not rule_group_id else None params = assign_params(fields=fields) if rule_group_id else assign_params(filter=filter_, fields=fields) return self.http_request( method='GET', url_suffix=f'/analytics/rule_groups{id_suffix}', additional_headers=additional_headers, params=params ) def assets_list(self, range_: Optional[str] = None, filter_: Optional[str] = None, fields: Optional[str] = None): return self.http_request( method='GET', url_suffix='/asset_model/assets', additional_headers={'Range': range_}, params=assign_params(filter=filter_, fields=fields) ) def saved_searches_list(self, range_: str, timeout: Optional[int], saved_search_id: Optional[str] = None, filter_: Optional[str] = None, fields: Optional[str] = None): id_suffix = f'/{saved_search_id}' if saved_search_id else '' params = assign_params(fields=fields) if saved_search_id else assign_params(filter=filter_, fields=fields) additional_headers = {'Range': range_} if not saved_search_id else None return self.http_request( method='GET', url_suffix=f'/ariel/saved_searches{id_suffix}', additional_headers=additional_headers, params=params, timeout=timeout ) def searches_list(self, range_: str, filter_: Optional[str] = None): return self.http_request( method='GET', url_suffix='/ariel/searches', additional_headers={'Range': range_}, params=assign_params(filter=filter_) ) def search_create(self, query_expression: Optional[str] = None, saved_search_id: Optional[str] = None): return self.http_request( method='POST', url_suffix='/ariel/searches', params=assign_params( query_expression=query_expression, saved_search_id=saved_search_id ) ) def search_status_get(self, search_id: str): return self.http_request( method='GET', url_suffix=f'/ariel/searches/{search_id}', ) def search_results_get(self, search_id: str, range_: Optional[str] = None): return self.http_request( method='GET', url_suffix=f'/ariel/searches/{search_id}/results', additional_headers={'Range': range_} if range_ else None ) def reference_sets_list(self, range_: Optional[str] = None, ref_name: Optional[str] = None, filter_: Optional[str] = None, fields: Optional[str] = None): name_suffix = f'/{parse.quote(ref_name, safe='')}' if ref_name else '' params = assign_params(fields=fields) if ref_name else assign_params(filter=filter_, fields=fields) additional_headers = {'Range': range_} if not ref_name else None return self.http_request( method='GET', url_suffix=f'/reference_data/sets{name_suffix}', params=params, additional_headers=additional_headers ) def reference_set_create(self, ref_name: str, element_type: str, timeout_type: Optional[str] = None, time_to_live: Optional[str] = None, fields: Optional[str] = None): return self.http_request( method='POST', url_suffix='/reference_data/sets', params=assign_params( name=ref_name, element_type=element_type, timeout_type=timeout_type, time_to_live=time_to_live, fields=fields ) ) def reference_set_delete(self, ref_name: str, purge_only: Optional[str] = None, fields: Optional[str] = None): return self.http_request( method='DELETE', url_suffix=f'/reference_data/sets/{parse.quote(ref_name, safe='')}', params=assign_params(purge_only=purge_only, fields=fields) ) def reference_set_value_upsert(self, ref_name: str, value: str, source: Optional[str] = None, fields: Optional[str] = None): return self.http_request( method='POST', url_suffix=f'/reference_data/sets/{parse.quote(ref_name, safe='')}', params=assign_params(value=value, source=source, fields=fields) ) def reference_set_value_delete(self, ref_name: str, value: str): return self.http_request( method='DELETE', url_suffix=f'/reference_data/sets/{parse.quote(ref_name, safe='')}/{value}' ) def domains_list(self, domain_id: Optional[int] = None, range_: Optional[str] = None, filter_: Optional[str] = None, fields: Optional[str] = None): id_suffix = f'/{domain_id}' if domain_id else '' params = assign_params(fields=fields) if domain_id else assign_params(filter=filter_, fields=fields) additional_headers = {'Range': range_} if not domain_id and range_ else None return self.http_request( method='GET', url_suffix=f'/config/domain_management/domains{id_suffix}', additional_headers=additional_headers, params=params ) def indicators_upload(self, ref_name: str, indicators: Any, fields: Optional[str] = None): headers = { 'Content-Type': 'application/json' } if fields: headers['fields'] = fields return self.http_request( method='POST', url_suffix=f'/reference_data/sets/bulk_load/{parse.quote(ref_name, safe='')}', json_data=indicators, additional_headers=headers ) def geolocations_for_ip(self, filter_: Optional[str] = None, fields: Optional[str] = None): return self.http_request( method='GET', url_suffix='/services/geolocations', params=assign_params(filter=filter_, fields=fields) ) def log_sources_list(self, qrd_encryption_algorithm: str, qrd_encryption_password: str, range_: str, filter_: Optional[str] = None, fields: Optional[str] = None): return self.http_request( method='GET', url_suffix='/config/event_sources/log_source_management/log_sources', params=assign_params(filter=filter_, fields=fields), additional_headers={ 'x-qrd-encryption-algorithm': qrd_encryption_algorithm, 'x-qrd-encryption-password': qrd_encryption_password, 'Range': range_ } ) def custom_properties(self, range_: Optional[str] = None, filter_: Optional[str] = None, fields: Optional[str] = None): return self.http_request( method='GET', url_suffix='/config/event_sources/custom_properties/regex_properties', params=assign_params(filter=filter_, fields=fields), additional_headers={'Range': range_} if range_ else None ) def offense_types(self, filter_: Optional[str] = None, fields: Optional[str] = None): return self.http_request( method='GET', url_suffix='/siem/offense_types', params=assign_params(filter=filter_, fields=fields) ) def get_addresses(self, address_suffix: str, filter_: Optional[str] = None, fields: Optional[str] = None, range_: Optional[str] = None): return self.http_request( method='GET', url_suffix=f'/siem/{address_suffix}', params=assign_params(filter=filter_, fields=fields), additional_headers={'Range': range_} if range_ else None ) def test_connection(self): """ Test connection with databases (should always be up) """ self.http_request(method='GET', url_suffix='/ariel/databases') return 'ok' ''' HELPER FUNCTIONS ''' def safely_update_context_data(func: Callable): """Decorator for updating context data using versions. In case of a race condition, preform func with the new context_data and try updating again. Args: func: The function to preform with the new context data before updating. raise ValueError if context_data or version are not in the kwargs for the function. raise DemistoException if reached maximum of retries. """ def wrapper(*args, **kwargs): context_was_set = False retries = 0 max_retries = 5 return_value = None while not context_was_set and retries < max_retries: context_data, version, return_value = func(*args, **kwargs) print_debug_msg(f'Attempting to update context data after version {version} with retry {retries}') new_context_data, new_version = get_integration_context_with_version() if new_version == version: try: set_to_integration_context_with_retries(context_data, max_retry_times=1) context_was_set = True print_debug_msg(f'Updated integration context after version {version} in retry {retries}.') except Exception as e: if 'Max retry attempts exceeded' in str(e): continue else: raise e else: if 'context_data' not in kwargs or 'version' not in kwargs: raise ValueError('context_data and version must be in the func kwargs if ' 'safely_update_context_data decorator is used but were not found.') else: kwargs['context_data'] = extract_context_data(new_context_data) kwargs['version'] = new_version print_debug_msg(f'Could not update context data after version {version} due to new ' f'version {new_version} in retry {retries}') retries = retries + 1 if retries == max_retries: raise DemistoException(f'Reached maximum retries, could not update context data for function {func}.') return return_value return wrapper def add_iso_entries_to_dict(dicts: List[Dict]) -> List[Dict]: """ Takes list of dicts, for each dict: creates a new dict, and for each field in the output that is contained in 'USECS_ENTRIES', maps its value to be iso format corresponding to the value of the field. Args: dicts (List[Dict]): List of the dicts to be transformed. Returns: (List[Dict]): New dicts with iso entries for the corresponding items in 'USECS_ENTRIES' """ return [{k: (get_time_parameter(v, iso_format=True) if k in USECS_ENTRIES else v) for k, v in dict_.items()} for dict_ in dicts] def sanitize_outputs(outputs: Any, key_replace_dict: Optional[Dict] = None) -> List[Dict]: """ Gets a list of all the outputs, and sanitizes outputs. - Removes empty elements. - adds ISO entries to the outputs. - Outputs only keys found in 'key_replace_dict', saving their names by 'key_replace_dict values, if 'key_replace_dict' is not None. Args: outputs (List[Dict]): List of the outputs to be sanitized. key_replace_dict (Dict): Dict of the keys to transform their names. Returns: (List[Dict]): Sanitized outputs. """ if not isinstance(outputs, list): outputs = [outputs] outputs = [remove_empty_elements(output) for output in outputs] outputs = add_iso_entries_to_dict(outputs) return build_final_outputs(outputs, key_replace_dict) if key_replace_dict else outputs def get_time_parameter(arg: Union[Optional[str], Optional[int]], iso_format: bool = False, epoch_format: bool = False): """ parses arg into date time object with aware time zone if 'arg' exists. If no time zone is given, sets timezone to UTC. Returns the date time object created/ISO format/epoch format. Args: arg (str): The argument to turn into aware date time. iso_format (bool): Whether to return date or the parsed format of the date. epoch_format (bool): Whether to return date or the epoch format of the date. Returns: - (None) If 'arg' is None, returns None. - (datetime): If 'arg' is exists and 'iso_format' and 'epoch_format' are false, returns date time. - (str): If 'arg' is exists and parse_format is true, returns ISO format of the date time object. - (int): If 'arg' is exists and epoch_format is true, returns epoch format of the date time object. """ maybe_unaware_date = arg_to_datetime(arg, is_utc=True) if not maybe_unaware_date: return None aware_time_date = maybe_unaware_date if maybe_unaware_date.tzinfo else UTC_TIMEZONE.localize( maybe_unaware_date) if iso_format: return aware_time_date.isoformat() if epoch_format: return int(aware_time_date.timestamp() * 1000) return aware_time_date def build_final_outputs(outputs: List[Dict], old_new_dict: Dict) -> List[Dict]: """ Receives outputs, or a single output, and a dict containing mapping of old key names to new key names. Returns a list of outputs containing the new names contained in old_new_dict. Args: outputs (Dict): Outputs to replace its keys. old_new_dict (Dict): Old key name mapped to new key name. Returns: (Dict): The dictionary with the transformed keys and their values. """ return [{old_new_dict.get(k): v for k, v in output.items() if k in old_new_dict} for output in outputs] def build_headers(first_headers: List[str], all_headers: Set[str]) -> List[str]: """ Receives headers to be shown first in entry room, and concat all the headers after first headers. Args: first_headers (Set[str]): First headers to be shown in the entry room. all_headers (Set[str]): List of all of the headers. Returns: (List[str]): List of all of the headers, where first_headers are first in the list. """ return first_headers + list(set.difference(all_headers, first_headers)) def is_valid_ip(ip: str) -> bool: try: ip_address(ip) return True except ValueError: print_debug_msg(f'IP {ip} was found invalid.') return False def get_offense_types(client: Client, offenses: List[Dict]) -> Dict: """ Receives list of offenses, and performs API call to QRadar service to retrieve the offense type names matching the offense type IDs of the offenses. Args: client (Client): Client to perform the API request to QRadar. offenses (List[Dict]): List of all of the offenses. Returns: (Dict): Dictionary of {offense_type_id: offense_type_name} """ try: offense_types_ids = {offense.get('offense_type') for offense in offenses if offense.get('offense_type') is not None} if not offense_types_ids: return dict() offense_types = client.offense_types(filter_=f'''id in ({','.join(map(str, offense_types_ids))})''', fields='id,name') return {offense_type.get('id'): offense_type.get('name') for offense_type in offense_types} except Exception as e: demisto.error(f"Encountered an issue while getting offense type: {e}") return {} def get_offense_closing_reasons(client: Client, offenses: List[Dict]) -> Dict: """ Receives list of offenses, and performs API call to QRadar service to retrieve the closing reason names matching the closing reason IDs of the offenses. Args: client (Client): Client to perform the API request to QRadar. offenses (List[Dict]): List of all of the offenses. Returns: (Dict): Dictionary of {closing_reason_id: closing_reason_name} """ try: closing_reason_ids = {offense.get('closing_reason_id') for offense in offenses if offense.get('closing_reason_id') is not None} if not closing_reason_ids: return dict() closing_reasons = client.closing_reasons_list(filter_=f'''id in ({','.join(map(str, closing_reason_ids))})''', fields='id,text') return {closing_reason.get('id'): closing_reason.get('text') for closing_reason in closing_reasons} except Exception as e: demisto.error(f"Encountered an issue while getting offense closing reasons: {e}") return {} def get_domain_names(client: Client, outputs: List[Dict]) -> Dict: """ Receives list of outputs, and performs API call to QRadar service to retrieve the domain names matching the domain IDs of the outputs. Args: client (Client): Client to perform the API request to QRadar. outputs (List[Dict]): List of all of the offenses. Returns: (Dict): Dictionary of {domain_id: domain_name} """ try: domain_ids = {offense.get('domain_id') for offense in outputs if offense.get('domain_id') is not None} if not domain_ids: return dict() domains_info = client.domains_list(filter_=f'''id in ({','.join(map(str, domain_ids))})''', fields='id,name') return {domain_info.get('id'): domain_info.get('name') for domain_info in domains_info} except Exception as e: demisto.error(f"Encountered an issue while getting offense domain names: {e}") return {} def get_rules_names(client: Client, offenses: List[Dict]) -> Dict: """ Receives list of offenses, and performs API call to QRadar service to retrieve the rules names matching the rule IDs of the offenses. Args: client (Client): Client to perform the API request to QRadar. offenses (List[Dict]): List of all of the offenses. Returns: (Dict): Dictionary of {rule_id: rule_name} """ try: rules_ids = {rule.get('id') for offense in offenses for rule in offense.get('rules', [])} if not rules_ids: return dict() rules = client.rules_list(None, None, f'''id in ({','.join(map(str, rules_ids))})''', 'id,name') return {rule.get('id'): rule.get('name') for rule in rules} except Exception as e: demisto.error(f"Encountered an issue while getting offenses rules: {e}") return {} def get_offense_addresses(client: Client, offenses: List[Dict], is_destination_addresses: bool) -> Dict: """ Receives list of offenses, and performs API call to QRadar service to retrieve the source IP values matching the source IPs IDs of the offenses. Args: client (Client): Client to perform the API request to QRadar. offenses (List[Dict]): List of all of the offenses. is_destination_addresses(bool): Whether addresses to enrich are destination IPs (or source). Returns: (Dict): Dictionary of {source_address_id: source_address_name}. """ address_type = 'local_destination' if is_destination_addresses else 'source' address_field = f'{address_type}_ip' address_list_field = f'{address_type}_address_ids' url_suffix = f'{address_type}_addresses' def get_addresses_for_batch(b: List): try: return client.get_addresses(url_suffix, f'''id in ({','.join(map(str, b))})''', f'id,{address_field}') except Exception as e: demisto.error(f'Failed getting address barch with error: {e}') return [] addresses_ids = [address_id for offense in offenses for address_id in offense.get(address_list_field, [])] # Submit addresses in batches to avoid overloading QRadar service addresses_batches = [get_addresses_for_batch(b) for b in batch(addresses_ids[:OFF_ENRCH_LIMIT], batch_size=int(BATCH_SIZE))] return {address_data.get('id'): address_data.get(address_field) for addresses_batch in addresses_batches for address_data in addresses_batch} def create_single_asset_for_offense_enrichment(asset: Dict) -> Dict: """ Recieves one asset, and returns the expected asset values for enriching offense. Args: asset (Dict): Asset to enrich the offense with Returns: (Dict): The enriched asset. """ interfaces = {'interfaces': [{ 'mac_address': interface.get('mac_address'), 'id': interface.get('id'), 'ip_addresses': [{ 'type': ip_add.get('type'), 'value': ip_add.get('value') } for ip_add in interface.get('ip_addresses', [])] } for interface in asset.get('interfaces', [])]} properties = {prop.get('name'): prop.get('value') for prop in asset.get('properties', []) if 'name' in prop and 'value' in prop} offense_without_properties = {k: v for k, v in asset.items() if k != 'properties'} return add_iso_entries_to_asset(dict(offense_without_properties, **properties, **interfaces)) def enrich_offense_with_assets(client: Client, offense_ips: List[str]) -> List[Dict]: """ Receives list of offense's IPs, and performs API call to QRadar service to retrieve assets correlated to IPs given. Args: client (Client): Client to perform the API request to QRadar. offense_ips (List[str]): List of all of the offense's IPs. Returns: (List[Dict]): List of all the correlated assets. """ def get_assets_for_ips_batch(b: List): filter_query = ' or '.join([f'interfaces contains ip_addresses contains value="{ip}"' for ip in b]) try: return client.assets_list(filter_=filter_query) except Exception as e: demisto.error(f'Failed getting assets for filter_query: {filter_query}. {e}') return [] offense_ips = [offense_ip for offense_ip in offense_ips if is_valid_ip(offense_ip)] # Submit addresses in batches to avoid overloading QRadar service assets = [asset for b in batch(offense_ips[:OFF_ENRCH_LIMIT], batch_size=int(BATCH_SIZE)) for asset in get_assets_for_ips_batch(b)] return [create_single_asset_for_offense_enrichment(asset) for asset in assets] def enrich_offenses_result(client: Client, offenses: Any, enrich_ip_addresses: bool, enrich_assets: bool) -> List[Dict]: """ Receives list of offenses, and enriches the offenses with the following: - Changes offense_type value from the offense type ID to the offense type name. - Changes closing_reason_id value from closing reason ID to the closing reason name. - Adds a link to the URL of each offense. - Adds the domain name of the domain ID for each offense. - Adds to each rule of the offense its name. - Adds enrichment to each source/destination IP ID to its address (if enrich_ip_addresses is true). - Adds enrichment of assets to each offense (if enrich_assets is true). Args: client (Client): Client to perform the API calls. offenses (Any): List of all of the offenses to enrich. enrich_ip_addresses (bool): Whether to enrich the offense source/destination IP addresses. enrich_assets (bool): Whether to enrich the offense with assets. Returns: (List[Dict]): The enriched offenses. """ if not isinstance(offenses, list): offenses = [offenses] print_debug_msg('Enriching offenses') offense_types_id_name_dict = get_offense_types(client, offenses) closing_reasons_id_name_dict = get_offense_closing_reasons(client, offenses) domain_id_name_dict = get_domain_names(client, offenses) if DOMAIN_ENRCH_FLG.lower() == 'true' else dict() rules_id_name_dict = get_rules_names(client, offenses) if RULES_ENRCH_FLG.lower() == 'true' else dict() source_addresses_id_ip_dict = get_offense_addresses(client, offenses, False) if enrich_ip_addresses else dict() destination_addresses_id_ip_dict = get_offense_addresses(client, offenses, True) if enrich_ip_addresses else dict() def create_enriched_offense(offense: Dict) -> Dict: link_to_offense_suffix = '/console/do/sem/offensesummary?appName=Sem&pageId=OffenseSummary&summaryId' \ f'''={offense.get('id')}''' offense_type = offense.get('offense_type') closing_reason_id = offense.get('closing_reason_id') domain_id = offense.get('domain_id') basic_enriches = { 'offense_type': offense_types_id_name_dict.get(offense_type, offense_type), 'closing_reason_id': closing_reasons_id_name_dict.get(closing_reason_id, closing_reason_id), 'LinkToOffense': urljoin(client.server, link_to_offense_suffix), } domain_enrich = { 'domain_name': domain_id_name_dict.get(domain_id, domain_id) } if DOMAIN_ENRCH_FLG.lower() == 'true' and domain_id_name_dict.get(domain_id, domain_id) else dict() rules_enrich = { 'rules': [{ 'id': rule.get('id'), 'type': rule.get('type'), 'name': rules_id_name_dict.get(rule.get('id'), rule.get('id')) } for rule in offense.get('rules', [])] if RULES_ENRCH_FLG.lower() == 'true' else dict() } source_addresses_enrich = { 'source_address_ids': [source_addresses_id_ip_dict.get(source_address_id) for source_address_id in offense.get('source_address_ids', [])] } if enrich_ip_addresses else dict() destination_addresses_enrich = { 'local_destination_address_ids': [destination_addresses_id_ip_dict.get(destination_address_id) for destination_address_id in offense.get('local_destination_address_ids', [])] } if enrich_ip_addresses else dict() if enrich_assets: source_ips: List = source_addresses_enrich.get('source_address_ids', []) destination_ips: List = destination_addresses_enrich.get('local_destination_address_ids', []) all_ips: List = source_ips + destination_ips asset_enrich = {'assets': enrich_offense_with_assets(client, all_ips)} else: asset_enrich = dict() return dict(offense, **basic_enriches, **domain_enrich, **rules_enrich, **source_addresses_enrich, **destination_addresses_enrich, **asset_enrich) result = [create_enriched_offense(offense) for offense in offenses] print_debug_msg('Enriched offenses successfully.') return result def enrich_asset_properties(properties: List, properties_to_enrich_dict: Dict) -> Dict: """ Receives list of properties of an asset, and properties to enrich, and returns a dict containing the enrichment Args: properties (List): List of properties of an asset. properties_to_enrich_dict (Dict): Properties to be enriched. Returns: (List[Dict]) List of new assets with enrichment. """ return { properties_to_enrich_dict.get(prop.get('name')): { 'Value': prop.get('value'), 'LastUser': prop.get('last_reported_by') } for prop in properties if prop.get('name') in properties_to_enrich_dict } def add_iso_entries_to_asset(asset: Dict) -> Dict: """ Transforms epoch entries to ISO entries in an asset. Requires a special treatment, because some of the usec entries are nested. Args: asset (Dict): Asset to transform its epoch entries to ISO. Returns: (Dict): Asset transformed. """ def get_asset_entry(k: str, v: Any): if k == 'interfaces': return [{ k: (get_time_parameter(v, iso_format=True) if k in USECS_ENTRIES else add_iso_entries_to_dict(v) if k == 'ip_addresses' else v) for k, v in interface.items() } for interface in v] elif k == 'properties': return add_iso_entries_to_dict(v) elif k in USECS_ENTRIES: return get_time_parameter(v, iso_format=True) else: return v return {k: get_asset_entry(k, v) for k, v in asset.items()} def enrich_assets_results(client: Client, assets: Any, full_enrichment: bool) -> List[Dict]: """ Receives list of assets, and enriches each asset with 'Endpoint' entry containing the following: - IP addresses of all interfaces. - OS name. - MAC addresses of the interfaces, if full enrichment was requested. - Domain name if full enrichment was requested. - Properties enrichment. Args: client (Client): Client to perform API call to retrieve domain names corresponding to the domain IDs. assets (List[Dict]): List of assets to be enriched. full_enrichment (bool): Whether the asset should be full enriched. Returns: (List[Dict]) List of new assets with enrichment. """ domain_id_name_dict = get_domain_names(client, assets) if full_enrichment else dict() def enrich_single_asset(asset: Dict) -> Dict: updated_asset = add_iso_entries_to_asset(asset) interfaces = updated_asset.get('interfaces', []) properties = updated_asset.get('properties', []) domain_id = updated_asset.get('domain_id') os_name = next((prop.get('value') for prop in properties if prop.get('name') == 'Primary OS ID'), None) ip_enrichment = { 'IPAddress': [ip_add.get('value') for interface in interfaces for ip_add in interface.get('ip_addresses', []) if ip_add.get('value')] } os_enrichment = {'OS': os_name} if os_name else dict() mac_enrichment = { 'MACAddress': [interface.get('mac_address') for interface in interfaces if interface.get('mac_address')] } if full_enrichment else dict() domains_enrichment = {'Domain': domain_id_name_dict.get(domain_id, domain_id)} \ if full_enrichment and domain_id else dict() basic_properties_enrichment = enrich_asset_properties(properties, ASSET_PROPERTIES_NAME_MAP) full_properties_enrichment = enrich_asset_properties(properties, FULL_ASSET_PROPERTIES_NAMES_MAP) \ if full_enrichment else dict() enriched_asset = dict(asset, **basic_properties_enrichment, **full_properties_enrichment) return {'Asset': add_iso_entries_to_asset(enriched_asset), 'Endpoint': dict(ip_enrichment, **os_enrichment, **mac_enrichment, **domains_enrichment)} return [enrich_single_asset(asset) for asset in assets] def get_minimum_id_to_fetch(highest_offense_id: int, user_query: Optional[str]) -> int: """ Receives the highest offense ID saved from last run, and user query. Checks if user query has a limitation for a minimum ID. If such ID exists, returns the maximum between 'highest_offense_id' and the minimum ID limitation received by the user query. Args: highest_offense_id (int): Minimum ID to fetch offenses by from last run. user_query (Optional[str]): User query for QRadar service. Returns: (int): The Minimum ID to fetch offenses by. """ if user_query: id_query = ID_QUERY_REGEX.search(user_query) if id_query: id_query_raw = id_query.group(0) operator = '>=' if '>=' in id_query_raw else '>' # safe to int parse without catch because regex checks for number user_offense_id = int(id_query.group(0).split(operator)[1].strip()) user_lowest_offense_id = user_offense_id if operator == '>' else user_offense_id - 1 print_debug_msg(f'Found ID in user query: {user_lowest_offense_id}, last highest ID: {highest_offense_id}') return max(highest_offense_id, user_lowest_offense_id) return highest_offense_id def get_offense_enrichment(enrichment: str) -> Tuple[bool, bool]: """ Receives enrichment asked by the user, returns true or false values indicating which enrichment should be done. Args: enrichment (Optional[str]): Enrichment argument. Returns: (bool, bool): Tuple of (ip_enrich, asset_enrich). """ if enrichment == 'IPs And Assets': return True, True if enrichment == 'IPs': return True, False return False, False def print_debug_msg(msg: str): """ Prints a message to debug with QRadarMsg prefix. Args: msg (str): Message to be logged. """ demisto.debug(f'QRadarMsg - {msg}') def reset_mirroring_events_variables(mirror_options: str): """In case of change in mirror_options initialize mirror with events context data variables. Args: mirror_options: The current mirror options Returns: None """ ctx = extract_context_data(get_integration_context().copy()) try: print_mirror_events_stats(ctx, f"New Long Running Container - Before Mirroring Variables Reset, " f"Mirror Option {mirror_options}") except Exception as e: print_debug_msg(f'Could not print mirror_events_stats due to error: {str(e)} \n ' f'Reseting mirroring vars') mirror_options = 'needs reset to mirroring vars' if mirror_options != MIRROR_OFFENSE_AND_EVENTS: ctx[UPDATED_MIRRORED_OFFENSES_CTX_KEY] = [] ctx[MIRRORED_OFFENSES_CTX_KEY] = [] ctx[RESUBMITTED_MIRRORED_OFFENSES_CTX_KEY] = [] print_mirror_events_stats(ctx, "New Long Running Container - After Mirroring Variables Reset") set_to_integration_context_with_retries(encode_context_data(ctx)) def is_reset_triggered(): """ Checks if reset of integration context have been made by the user. Because fetch is long running execution, user communicates with us by calling 'qradar-reset-last-run' command which sets reset flag in context. Returns: (bool): - True if reset flag was set. If 'handle_reset' is true, also resets integration context. - False if reset flag was not found in integration context. """ ctx = get_integration_context() if ctx and RESET_KEY in ctx: print_debug_msg('Reset fetch-incidents.') set_integration_context({'samples': '[]'}) return True return False def validate_long_running_params(params: Dict) -> None: """ Receives params, checks whether the required parameters for long running execution is configured. Args: params (Dict): Cortex XSOAR params. Returns: (None): If all required params are set, raises DemistoException otherwise. """ for param_field, param_display in LONG_RUNNING_REQUIRED_PARAMS.items(): if param_field not in params: raise DemistoException(f'Parameter {param_display} is required when enabling long running execution.' ' Please set a value for it.') ''' COMMAND FUNCTIONS ''' def test_module_command(client: Client, params: Dict) -> str: """ Tests API connectivity and authentication' Returning 'ok' indicates that the integration works like it is supposed to. Connection to the service is successful. Raises exceptions if something goes wrong. Args: client (Client): Client to perform the API calls. params (Dict): Demisto params. Returns: - (str): 'ok' if test passed - raises DemistoException if something had failed the test. """ try: ctx = extract_context_data(get_integration_context(), include_id=True) print_mirror_events_stats(ctx, "Test Module") is_long_running = params.get('longRunning') if is_long_running: validate_long_running_params(params) ip_enrich, asset_enrich = get_offense_enrichment(params.get('enrichment', 'IPs And Assets')) # Try to retrieve the last successfully retrieved offense last_highest_id = max(ctx.get(LAST_FETCH_KEY, 0) - 1, 0) get_incidents_long_running_execution( client=client, offenses_per_fetch=1, user_query=params.get('query', ''), fetch_mode=params.get('fetch_mode', ''), events_columns=params.get('events_columns', ''), events_limit=0, ip_enrich=ip_enrich, asset_enrich=asset_enrich, last_highest_id=last_highest_id, incident_type=params.get('incident_type'), mirror_direction=MIRROR_DIRECTION.get(params.get('mirror_options', DEFAULT_MIRRORING_DIRECTION)) ) else: client.offenses_list(range_="items=0-0") message = 'ok' except DemistoException as e: err_msg = str(e) if 'unauthorized to access the requested resource' in err_msg or 'No SEC header present in request' in err_msg: message = 'Authorization Error: make sure credentials are correct.' else: raise e return message def fetch_incidents_command() -> List[Dict]: """ Fetch incidents implemented, for mapping purposes only. Returns list of samples saved by long running execution. Returns: (List[Dict]): List of incidents samples. """ ctx = get_integration_context() return extract_context_data(ctx).get('samples', []) def create_search_with_retry(client: Client, fetch_mode: str, offense: Dict, event_columns: str, events_limit: int, max_retries: int = EVENTS_FAILURE_LIMIT) -> Optional[Dict]: """ Creates a search to retrieve events for an offense. Has retry mechanism, because QRadar service tends to return random errors when it is loaded. Therefore, 'max_retries' retries will be made, to try avoid such cases as much as possible. Args: client (Client): Client to perform the API calls. fetch_mode (str): Which enrichment mode was requested. Can be 'Fetch With All Events', 'Fetch Correlation Events Only' offense (Dict): Offense ID to enrich with events. event_columns (str): Columns of the events to be extracted from query. events_limit (int): Maximum number of events to enrich the offense. max_retries (int): Number of retries. Returns: (Dict): If search was created successfully. None: If reset was triggered or number of retries exceeded limit. """ additional_where = ''' AND LOGSOURCETYPENAME(devicetype) = 'Custom Rule Engine' ''' \ if fetch_mode == FetchMode.correlations_events_only.value else '' # Decrease 1 minute from start_time to avoid the case where the minute queried of start_time equals end_time. offense_start_time = offense['start_time'] - 60 * 1000 offense_id = offense['id'] query_expression = ( f'SELECT {event_columns} FROM events WHERE INOFFENSE({offense_id}) {additional_where} limit {events_limit} ' f'START {offense_start_time}' ) print_debug_msg(f'Trying to get events for offense ID: {offense_id}, ' f'offense_start_time: {offense_start_time}, ' f'additional_where: {additional_where}, ' f'events_limit: {events_limit}.') num_of_failures = 0 while num_of_failures <= max_retries: try: print_debug_msg(f'Creating search for offense ID: {offense_id}, ' f'query_expression: {query_expression}.') ret_value = client.search_create(query_expression=query_expression) print_debug_msg(f'Created search for offense ID: {offense_id}, ' f'offense_start_time: {offense_start_time}, ' f'additional_where: {additional_where}, ' f'events_limit: {events_limit}, ' f'ret_value: {ret_value}.') return ret_value except Exception: print_debug_msg(f'Failed to create search for offense ID: {offense_id}. ' f'Retry number {num_of_failures}/{max_retries}.') print_debug_msg(traceback.format_exc()) num_of_failures += 1 if num_of_failures == max_retries: print_debug_msg(f'Max retries for creating search for offense: {offense_id}. Returning empty.') break time.sleep(FAILURE_SLEEP) print_debug_msg(f'Returning empty events for offense ID: {offense_id}.') return None def poll_offense_events_with_retry(client: Client, search_id: str, offense_id: int, max_retries: int = EVENTS_FAILURE_LIMIT) -> Tuple[List[Dict], str]: """ Polls QRadar service for search ID given until status returned is within '{'CANCELED', 'ERROR', 'COMPLETED'}'. Afterwards, performs a call to retrieve the events returned by the search. Has retry mechanism, because QRadar service tends to return random errors when it is loaded. Therefore, 'max_retries' retries will be made, to try avoid such cases as much as possible. Args: client (Client): Client to perform the API calls. search_id (str): ID of the search to poll for its status. offense_id (int): ID of the offense to enrich with events returned by search. Used for logging purposes here. max_retries (int): Number of retries. Returns: (List[Dict], str): List of events returned by query. Returns empty list if number of retries exceeded limit, A failure message in case an error occured. """ num_of_failures = 0 start_time = time.time() failure_message = '' while num_of_failures <= max_retries: try: print_debug_msg(f"Getting search status for {search_id}") search_status_response = client.search_status_get(search_id) print_debug_msg(f"Got search status for {search_id}") query_status = search_status_response.get('status') # failures are relevant only when consecutive num_of_failures = 0 print_debug_msg(f'Search query_status: {query_status}') # Possible values for query_status: {'CANCELED', 'ERROR', 'COMPLETED'} # Don't try to get events if CANCELLED or ERROR if query_status in {'CANCELED', 'ERROR'}: if failure_message == '': failure_message = f'query_status is {query_status}' return [], failure_message elif query_status == 'COMPLETED': print_debug_msg(f'Getting events for offense {offense_id}') search_results_response = client.search_results_get(search_id) print_debug_msg(f'Http response: {search_results_response.get('http_response', 'Not specified - ok')}') events = search_results_response.get('events', []) sanitized_events = sanitize_outputs(events) print_debug_msg(f'Fetched {len(sanitized_events)} events for offense {offense_id}.') return sanitized_events, failure_message elapsed = time.time() - start_time if elapsed >= FETCH_SLEEP: # print status debug every fetch sleep (or after) print_debug_msg(f'Still fetching offense {offense_id} events, search_id: {search_id}.') start_time = time.time() time.sleep(EVENTS_INTERVAL_SECS) except Exception as e: print_debug_msg( f'Error while fetching offense {offense_id} events, search_id: {search_id}. Error details: {str(e)} \n' f'{traceback.format_exc()}') num_of_failures += 1 if num_of_failures < max_retries: time.sleep(FAILURE_SLEEP) else: failure_message = f'{repr(e)} \nSee logs for further details.' print_debug_msg(f'Could not fetch events for offense ID: {offense_id}, returning empty events array.') return [], failure_message def enrich_offense_with_events(client: Client, offense: Dict, fetch_mode: str, events_columns: str, events_limit: int, max_retries: int = MAX_FETCH_EVENT_RETIRES): """ Enriches offense given with events. Has retry mechanism for events returned by query to QRadar. This is needed because events might not be indexed when performing the search, and QRadar will return less events than expected. Retry mechanism here meant to avoid such cases as much as possible Args: client (Client): Client to perform the API calls. offense (Dict): Offense to enrich with events. fetch_mode (str): Which enrichment mode was requested. Can be 'Fetch With All Events', 'Fetch Correlation Events Only' events_columns (str): Columns of the events to be extracted from query. events_limit (int): Maximum number of events to enrich the offense. max_retries (int): Number of retries. Returns: (Dict): Enriched offense with events. """ failure_message = '' events: List[dict] = [] min_events_size = min(offense.get('event_count', 0), events_limit) # decreasing 1 minute from the start_time to avoid the case where the minute queried of start_time equals end_time. for i in range(max_retries): # retry to check if we got all the event (its not an error retry), see docstring search_response = create_search_with_retry(client, fetch_mode, offense, events_columns, events_limit) if not search_response: continue offense_id = offense['id'] events, failure_message = poll_offense_events_with_retry(client, search_response['search_id'], offense_id) print_debug_msg(f"Polled events for offense ID {offense_id}") if len(events) >= min_events_size: print_debug_msg(f"Fetched {len(events)}/{min_events_size} for offense ID {offense_id}") break print_debug_msg(f'Did not fetch enough events. Expected at least {min_events_size}. Retrying to fetch events ' f'for offense ID: {offense_id}. Retry number {i}/{max_retries}') if i < max_retries - 1: time.sleep(SLEEP_FETCH_EVENT_RETIRES) print_debug_msg(f"Reached max retries for offense {offense.get("id")} with failure message {failure_message}") if failure_message == '' and len(events) < min_events_size: failure_message = 'Events were probably not indexed in QRadar at the time of the mirror.' offense = dict(offense, mirroring_events_message=failure_message) if events: offense = dict(offense, events=events) return offense def get_incidents_long_running_execution(client: Client, offenses_per_fetch: int, user_query: str, fetch_mode: str, events_columns: str, events_limit: int, ip_enrich: bool, asset_enrich: bool, last_highest_id: int, incident_type: Optional[str], mirror_direction: Optional[str]) -> Tuple[Optional[List[Dict]], Optional[int]]: """ Gets offenses from QRadar service, and transforms them to incidents in a long running execution. Args: client (Client): Client to perform the API calls. offenses_per_fetch (int): Maximum number of offenses to be fetched. user_query (str): If given, the user filters for fetching offenses from QRadar service. fetch_mode (str): Fetch mode of the offenses. Can be 'Fetch Without Events', 'Fetch With All Events', 'Fetch Correlation Events Only' events_columns (str): Events columns to extract by search query for each offense. Only used when fetch mode is not 'Fetch Without Events'. events_limit (int): Number of events to be fetched for each offense. Only used when fetch mode is not 'Fetch Without Events'. ip_enrich (bool): Whether to enrich offense by changing IP IDs of each offense to its IP value. asset_enrich (bool): Whether to enrich offense with assets last_highest_id (int): The highest ID of all the offenses that have been fetched from QRadar service. incident_type (Optional[str]): Incident type. mirror_direction (Optional[str]): Whether mirror in is activated or not. Returns: (List[Dict], int): List of the incidents, and the new highest ID for next fetch. (None, None): if reset was triggered """ offense_highest_id = get_minimum_id_to_fetch(last_highest_id, user_query) user_query = f' AND {user_query}' if user_query else '' filter_fetch_query = f'id>{offense_highest_id}{user_query}' print_debug_msg(f'Filter query to QRadar: {filter_fetch_query}') range_max = offenses_per_fetch - 1 if offenses_per_fetch else MAXIMUM_OFFENSES_PER_FETCH - 1 range_ = f'items=0-{range_max}' # if it fails here we can't recover, retry again later raw_offenses = client.offenses_list(range_, filter_=filter_fetch_query, sort=ASCENDING_ID_ORDER) if raw_offenses: raw_offenses_len = len(raw_offenses) print_debug_msg(f'raw_offenses size: {raw_offenses_len}') else: print_debug_msg('empty raw_offenses') new_highest_offense_id = raw_offenses[-1].get('id') if raw_offenses else offense_highest_id print_debug_msg(f'New highest ID returned from QRadar offenses: {new_highest_offense_id}') offenses = [] if fetch_mode != FetchMode.no_events.value: try: futures = [] for offense in raw_offenses: futures.append(EXECUTOR.submit( enrich_offense_with_events, client=client, offense=offense, fetch_mode=fetch_mode, events_columns=events_columns, events_limit=events_limit, )) offenses = [future.result(timeout=DEFAULT_EVENTS_TIMEOUT * 60) for future in futures] except concurrent.futures.TimeoutError as e: demisto.error( f"Error while enriching mirrored offenses with events: {str(e)} \n {traceback.format_exc()}") update_missing_offenses_from_raw_offenses(raw_offenses, offenses) else: offenses = raw_offenses if is_reset_triggered(): return None, None offenses_with_mirror = [ dict(offense, mirror_direction=mirror_direction, mirror_instance=demisto.integrationInstance()) for offense in offenses] if mirror_direction else offenses enriched_offenses = enrich_offenses_result(client, offenses_with_mirror, ip_enrich, asset_enrich) final_offenses = sanitize_outputs(enriched_offenses) incidents = create_incidents_from_offenses(final_offenses, incident_type) return incidents, new_highest_offense_id def update_missing_offenses_from_raw_offenses(raw_offenses: list, offenses: list): """ Populate offenses with missing offenses """ offenses_ids = {offense['id'] for offense in raw_offenses} or set() updated_offenses_ids = {offense['id'] for offense in offenses} or set() missing_ids = offenses_ids - updated_offenses_ids if missing_ids: for offense in raw_offenses: if offense['id'] in missing_ids: offenses.append(offense) def exclude_lists(original: List[dict], exclude: List[dict], key: str): """Exclude nodes of exclude list from the original list by key Args: original: The original list to exclude from exclude: The list of nodes to exclude key: The key to exclude by Returns: A list with the original nodes that were not excluded. """ exclude_keys = [excluded_node.get(key) for excluded_node in exclude] return [element.copy() for element in original if element.get(key) not in exclude_keys] def update_mirrored_events(client: Client, fetch_mode: str, events_columns: str, events_limit: int, context_data: dict, offenses_per_fetch: int) -> list: """Update mirrored offenses' events assuming a long running container. Args: client: Client to perform the API calls. fetch_mode: Bring correlated / not correlated events. events_columns: Events columns to extract by search query for each offense. events_limit: Number of events to be fetched for each offense. context_data: The integration's current context data. Extract the relevant offenses to update from it. offenses_per_fetch: The number of offenses to fetch. Returns: (A list of updated offenses with their events) """ offenses = context_data.get(MIRRORED_OFFENSES_CTX_KEY, []) if len(offenses) > offenses_per_fetch: offenses = offenses[:offenses_per_fetch] updated_offenses = [] try: if len(offenses) > 0: futures = [] for offense in offenses: print_debug_msg(f"Updating events in offense: {offense.get("id")}") futures.append(EXECUTOR.submit( enrich_offense_with_events, client=client, offense=offense, fetch_mode=fetch_mode, events_columns=events_columns, events_limit=events_limit, )) updated_offenses += [future.result(timeout=DEFAULT_EVENTS_TIMEOUT * 60) for future in futures] except Exception as e: print_debug_msg(f"Error while enriching mirrored offenses with events: {str(e)} \n {traceback.format_exc()}") update_missing_offenses_from_raw_offenses(offenses, updated_offenses) finally: return updated_offenses def create_incidents_from_offenses(offenses: List[Dict], incident_type: Optional[str]) -> List[Dict]: """ Transforms list of offenses given into incidents for Demisto. Args: offenses (List[Dict]): List of the offenses to transform into incidents. incident_type (Optional[str]): Incident type to be used for each incident. Returns: (List[Dict]): Incidents list. """ print_debug_msg(f'Creating {len(offenses)} incidents') return [{ 'name': f'''{offense.get('id')} {offense.get('description', '')}''', 'rawJSON': json.dumps(offense), 'occurred': get_time_parameter(offense.get('start_time'), iso_format=True), 'type': incident_type } for offense in offenses] def print_mirror_events_stats(context_data: dict, stage: str) -> Set[str]: """Print debug message with information about mirroring events. Args: context_data: The integration context data. stage: A prefix for the debug message. Returns: The ids of the mirrored offenses being currently processed. """ if not context_data: print_debug_msg("Not printing stats") return set() updated = context_data.get(UPDATED_MIRRORED_OFFENSES_CTX_KEY, []) waiting_for_update = context_data.get(MIRRORED_OFFENSES_CTX_KEY, []) resubmitted_ids = context_data.get(RESUBMITTED_MIRRORED_OFFENSES_CTX_KEY, []) last_fetch_key = context_data.get(LAST_FETCH_KEY, 'Missing') last_mirror_update = context_data.get('last_mirror_update', 0) samples = context_data.get('samples', []) sample_length = 0 if samples: sample_length = len(samples[0]) not_updated_ids = [str(offense.get('id')) for offense in waiting_for_update] stats = [(str(offense.get('id')), len(offense.get('events', []))) for offense in updated] print_debug_msg(f"Mirror Events Stats: {stage}\n Updated Offenses (id, len(events)): {stats}" f"\n Offenses ids waiting for update: {not_updated_ids}" f"\n Resubmitted offenses: {resubmitted_ids}" f"\n Last Fetch Key {last_fetch_key}, Last mirror update {last_mirror_update}, " f"sample length {sample_length}") updated_ids = [offense_id for offense_id, events_num in stats] return set(not_updated_ids + updated_ids + resubmitted_ids) @safely_update_context_data def move_updated_offenses(context_data: dict, version: Any, include_context_data: dict, updated_list: list) -> Tuple[dict, Any, Any]: """Move updated offenses from MIRRORED_OFFENSES_CTX_KEY to UPDATED_MIRRORED_OFFENSES_CTX_KEY. Args: context_data: The context data to update version: The version of the context data include_context_data: The context data changes to include updated_list: The list of updated offenses Returns: (The new context data, the context data version the changes were based on, The new context_data) """ new_context_data = include_context_data.copy() if updated_list: all_updated_mirrored_offenses = merge_lists( original_list=context_data.get(UPDATED_MIRRORED_OFFENSES_CTX_KEY, []), updated_list=updated_list, key='id') not_updated_list = exclude_lists(original=context_data.get(MIRRORED_OFFENSES_CTX_KEY, []), exclude=updated_list, key="id") new_context_data.update({UPDATED_MIRRORED_OFFENSES_CTX_KEY: all_updated_mirrored_offenses, MIRRORED_OFFENSES_CTX_KEY: not_updated_list, RESUBMITTED_MIRRORED_OFFENSES_CTX_KEY: context_data.get(RESUBMITTED_MIRRORED_OFFENSES_CTX_KEY, [])}) # type: ignore else: new_context_data.update( {UPDATED_MIRRORED_OFFENSES_CTX_KEY: context_data.get(UPDATED_MIRRORED_OFFENSES_CTX_KEY, []), MIRRORED_OFFENSES_CTX_KEY: context_data.get(MIRRORED_OFFENSES_CTX_KEY, []), RESUBMITTED_MIRRORED_OFFENSES_CTX_KEY: context_data.get(RESUBMITTED_MIRRORED_OFFENSES_CTX_KEY, [])}) if not new_context_data.get('samples'): new_context_data.update({'samples': context_data.get('samples')}) if not new_context_data.get('last_mirror_update'): new_context_data.update({'last_mirror_update': str(context_data.get('last_mirror_update', 0))}) return encode_context_data(new_context_data, include_id=True), version, new_context_data def perform_long_running_loop(client: Client, offenses_per_fetch: int, fetch_mode: str, mirror_options: str, user_query: str, events_columns: str, events_limit: int, ip_enrich: bool, asset_enrich: bool, incident_type: Optional[str], mirror_direction: Optional[str]): is_reset_triggered() ctx, ctx_version = get_integration_context_with_version() print_debug_msg(f'Starting fetch loop. Fetch mode: {fetch_mode}, Mirror option: {mirror_options}.') incidents, new_highest_id = get_incidents_long_running_execution( client=client, offenses_per_fetch=offenses_per_fetch, user_query=user_query, fetch_mode=fetch_mode, events_columns=events_columns, events_limit=events_limit, ip_enrich=ip_enrich, asset_enrich=asset_enrich, last_highest_id=int(json.loads(ctx.get(LAST_FETCH_KEY, '0'))), incident_type=incident_type, mirror_direction=mirror_direction ) orig_context_data = extract_context_data(ctx.copy(), include_id=True) context_data = {LAST_FETCH_KEY: orig_context_data.get(LAST_FETCH_KEY, 0)} updated_mirrored_offenses = None ctx = extract_context_data(ctx) if mirror_options == MIRROR_OFFENSE_AND_EVENTS: print_mirror_events_stats(ctx, "Long Running Command - Before Update") updated_mirrored_offenses = update_mirrored_events(client=client, fetch_mode=fetch_mode, events_columns=events_columns, events_limit=events_limit, context_data=ctx, offenses_per_fetch=offenses_per_fetch) if incidents and new_highest_id: incident_batch_for_sample = incidents[:SAMPLE_SIZE] if incidents else ctx.get('samples', []) if incident_batch_for_sample: print_debug_msg(f'Saving New Highest ID: {new_highest_id}') context_data.update({'samples': incident_batch_for_sample, LAST_FETCH_KEY: int(new_highest_id)}) # if incident creation fails, it'll drop the data and try again in the next iteration demisto.createIncidents(incidents) new_context_data = move_updated_offenses(context_data=ctx, version=ctx_version, include_context_data=context_data, updated_list=updated_mirrored_offenses) print_mirror_events_stats(new_context_data, "Long Running Command - After Update") def long_running_execution_command(client: Client, params: Dict): """ Long running execution of fetching incidents from QRadar service. Will continue to fetch in an infinite loop offenses from QRadar, Enriching each offense with events/IPs/assets according to the configurations given in Demisto params. transforming the offenses into incidents and sending them to Demisto to save the incidents. Args: client (Client): Client to perform API calls. params (Dict): Demisto params. """ validate_long_running_params(params) fetch_mode = params.get('fetch_mode', '') ip_enrich, asset_enrich = get_offense_enrichment(params.get('enrichment', 'IPs And Assets')) offenses_per_fetch = int(params.get('offenses_per_fetch')) # type: ignore user_query = params.get('query', '') events_columns = params.get('events_columns', '') events_limit = int(params.get('events_limit') or DEFAULT_EVENTS_LIMIT) incident_type = params.get('incident_type') mirror_options = params.get('mirror_options', DEFAULT_MIRRORING_DIRECTION) mirror_direction = MIRROR_DIRECTION.get(mirror_options) reset_mirroring_vars = False while not reset_mirroring_vars: try: reset_mirroring_events_variables(mirror_options) reset_mirroring_vars = True except Exception as e: print_debug_msg( f'Error while reseting mirroring variables, retring. Error details: {str(e)} \n' f'{traceback.format_exc()}') demisto.error('Exception when calling reset_mirroring_events_variables') raise e while True: try: perform_long_running_loop( client=client, offenses_per_fetch=offenses_per_fetch, fetch_mode=fetch_mode, mirror_options=mirror_options, user_query=user_query, events_columns=events_columns, events_limit=events_limit, ip_enrich=ip_enrich, asset_enrich=asset_enrich, incident_type=incident_type, mirror_direction=mirror_direction ) demisto.updateModuleHealth('') except Exception as e: msg = f'Error occurred during long running loop: {e}' demisto.updateModuleHealth(msg) demisto.error(msg) demisto.error(traceback.format_exc()) finally: print_debug_msg('Finished fetch loop') time.sleep(FETCH_SLEEP) def qradar_offenses_list_command(client: Client, args: Dict) -> CommandResults: """ Retrieves list of offenses from QRadar service. possible arguments: - offense_id: Retrieves details of the specific offense that corresponds to the ID given. - range: Range of offenses to return (e.g.: 0-20, 3-5, 3-3). - filter: Query filter to filter results returned by QRadar service. see https://www.ibm.com/support/knowledgecenter/SS42VS_SHR/com.ibm.qradarapi.doc/c_rest_api_filtering.html for more details. - fields: If used, will filter all fields except for the specified ones. Use this parameter to specify which fields you would like to get back in the response. Fields that are not explicitly named are excluded. Args: client (Client): QRadar client to perform the API call. args (Dict): Demisto args. Returns: CommandResults. """ offense_id = args.get('offense_id') range_ = f'''items={args.get('range', DEFAULT_RANGE_VALUE)}''' filter_ = args.get('filter') fields = args.get('fields') ip_enrich, asset_enrich = get_offense_enrichment(args.get('enrichment', 'None')) # if this call fails, raise an error and stop command execution response = client.offenses_list(range_, offense_id, filter_, fields) enriched_outputs = enrich_offenses_result(client, response, ip_enrich, asset_enrich) final_outputs = sanitize_outputs(enriched_outputs, OFFENSE_OLD_NEW_NAMES_MAP) headers = build_headers(['ID', 'Description', 'OffenseType', 'Status', 'Severity'], set(OFFENSE_OLD_NEW_NAMES_MAP.values())) return CommandResults( readable_output=tableToMarkdown('Offenses List', final_outputs, headers=headers, removeNull=True), outputs_prefix='QRadar.Offense', outputs_key_field='ID', outputs=final_outputs, raw_response=response ) def qradar_offense_update_command(client: Client, args: Dict) -> CommandResults: """ Updates offense that corresponds to the given offense ID. possible arguments: - offense_id (Required): Update offense that corresponds to ID given. - protected: Whether the offense is protected. - follow_up: Whether the offense should be marked for follow up. - status: Status of the offense. One of 'OPEN', 'HIDDEN', 'CLOSED'. - closing_reason_id: The ID of the reason the offense was closed. full list of closing reason IDs, full list of closing reason IDs can be retrieved by 'qradar-closing-reasons' command. - assigned_to: The user whom to assign the offense to. - fields: If used, will filter all fields except for the specified ones. Use this parameter to specify which fields you would like to get back in the response. Fields that are not explicitly named are excluded. Args: client (Client): QRadar client to perform the API call. args (Dict): Demisto args. Returns: CommandResults. """ offense_id: int = int(args['offense_id']) protected = args.get('protected') follow_up = args.get('follow_up') closing_reason_name = args.get('closing_reason_name') status = args.get('status') closing_reason_id = args.get('closing_reason_id') if status == 'CLOSED' and (not closing_reason_id and not closing_reason_name): raise DemistoException( '''Closing reason ID must be provided when closing an offense. Available closing reasons can be achieved by 'qradar-closing-reasons' command.''' ) if closing_reason_name: # if this call fails, raise an error and stop command execution closing_reasons_list = client.closing_reasons_list(include_deleted=True, include_reserved=True) for closing_reason in closing_reasons_list: if closing_reason.get('text') == closing_reason_name: closing_reason_id = closing_reason.get('id') if not closing_reason_id: raise DemistoException(f'Could not find closing reason name {closing_reason_name}. Please provide a valid' ' closing reason name. Closing reasons can be retrieved by running the ' 'qradar-closing-reasons command.') assigned_to = args.get('assigned_to') fields = args.get('fields') ip_enrich, asset_enrich = get_offense_enrichment(args.get('enrichment', 'None')) # if this call fails, raise an error and stop command execution response = client.offense_update(offense_id, protected, follow_up, status, closing_reason_id, assigned_to, fields) enriched_outputs = enrich_offenses_result(client, response, ip_enrich, asset_enrich) final_outputs = sanitize_outputs(enriched_outputs, OFFENSE_OLD_NEW_NAMES_MAP) headers = build_headers(['ID', 'Description', 'OffenseType', 'Status', 'Severity'], set(OFFENSE_OLD_NEW_NAMES_MAP.values())) return CommandResults( readable_output=tableToMarkdown('offense Update', final_outputs, headers, removeNull=True), outputs_prefix='QRadar.Offense', outputs_key_field='ID', outputs=final_outputs, raw_response=response ) def qradar_closing_reasons_list_command(client: Client, args: Dict) -> CommandResults: """ Retrieves list of closing reasons from QRadar service. possible arguments: - closing_reason_id: Retrieves details of the specific closing reason that corresponds to the ID given. - range: Range of offenses to return (e.g.: 0-20, 3-5, 3-3). - filter: Query filter to filter results returned by QRadar service. see https://www.ibm.com/support/knowledgecenter/SS42VS_SHR/com.ibm.qradarapi.doc/c_rest_api_filtering.html for more details. - fields: If used, will filter all fields except for the specified ones. Use this parameter to specify which fields you would like to get back in the response. Fields that are not explicitly named are excluded. Args: client (Client): QRadar client to perform the API call. args (Dict): Demisto args. Returns: CommandResults. """ closing_reason_id = args.get('closing_reason_id') include_reserved = argToBoolean(args.get('include_reserved', False)) include_deleted = argToBoolean(args.get('include_deleted', False)) range_ = f'''items={args.get('range', DEFAULT_RANGE_VALUE)}''' filter_ = args.get('filter') fields = args.get('fields') # if this call fails, raise an error and stop command execution response = client.closing_reasons_list(closing_reason_id, include_reserved, include_deleted, range_, filter_, fields) outputs = sanitize_outputs(response, CLOSING_REASONS_OLD_NEW_MAP) headers = build_headers(['ID', 'Name'], set(CLOSING_REASONS_OLD_NEW_MAP.values())) return CommandResults( readable_output=tableToMarkdown('Closing Reasons', outputs, headers=headers, removeNull=True), outputs_prefix='QRadar.Offense.ClosingReasons', outputs_key_field='ID', outputs=outputs, raw_response=response ) def qradar_offense_notes_list_command(client: Client, args: Dict) -> CommandResults: """ Retrieves list of notes corresponding to the ID of the offense ID given from QRadar service. possible arguments: - offense_id: The offense ID to retrieve the notes for. - note_id: The note ID to its details. - range: Range of notes to return for the offense corresponding to the offense ID (e.g.: 0-20, 3-5, 3-3). - filter: Query filter to filter results returned by QRadar service. see https://www.ibm.com/support/knowledgecenter/SS42VS_SHR/com.ibm.qradarapi.doc/c_rest_api_filtering.html for more details. - fields: If used, will filter all fields except for the specified ones. Use this parameter to specify which fields you would like to get back in the response. Fields that are not explicitly named are excluded. Args: client (Client): QRadar client to perform the API call. args (Dict): Demisto args. Returns: CommandResults. """ offense_id: int = int(args['offense_id']) note_id = args.get('note_id') range_ = f'''items={args.get('range', DEFAULT_RANGE_VALUE)}''' filter_ = args.get('filter') fields = args.get('fields') # if this call fails, raise an error and stop command execution response = client.offense_notes_list(offense_id, range_, note_id, filter_, fields) outputs = sanitize_outputs(response, NOTES_OLD_NEW_MAP) headers = build_headers(['ID', 'Text', 'CreatedBy', 'CreateTime'], set(NOTES_OLD_NEW_MAP.values())) return CommandResults( readable_output=tableToMarkdown(f'Offense Notes List For Offense ID {offense_id}', outputs, headers, removeNull=True), outputs_prefix='QRadar.Note', outputs_key_field='ID', outputs=outputs, raw_response=response ) def qradar_offense_notes_create_command(client: Client, args: Dict) -> CommandResults: """ Create a new note for the offense corresponding to the given offense ID with the note text given to QRadar service. possible arguments: - offense_id: The offense ID to add note to. - note_text: The note text. - fields: If used, will filter all fields except for the specified ones. Use this parameter to specify which fields you would like to get back in the response. Fields that are not explicitly named are excluded. Args: client (Client): QRadar client to perform the API call. args (Dict): Demisto args. Returns: CommandResults. """ offense_id: int = int(args['offense_id']) note_text: str = args.get('note_text', '') fields = args.get('fields') # if this call fails, raise an error and stop command execution response = client.offense_notes_create(offense_id, note_text, fields) outputs = sanitize_outputs(response, NOTES_OLD_NEW_MAP) headers = build_headers(['ID', 'Text', 'CreatedBy', 'CreateTime'], set(NOTES_OLD_NEW_MAP.values())) return CommandResults( readable_output=tableToMarkdown('Create Note', outputs, headers, removeNull=True), outputs_prefix='QRadar.Note', outputs_key_field='ID', outputs=outputs, raw_response=response ) def qradar_rules_list_command(client: Client, args: Dict) -> CommandResults: """ Retrieves list of rules from QRadar service. possible arguments: - rule_id: Retrieves details of the specific rule that corresponds to the ID given. - rule_type: Retrieves rules corresponding to the given rule type. - range: Range of offenses to return (e.g.: 0-20, 3-5, 3-3). - filter: Query filter to filter results returned by QRadar service. see https://www.ibm.com/support/knowledgecenter/SS42VS_SHR/com.ibm.qradarapi.doc/c_rest_api_filtering.html for more details. - fields: If used, will filter all fields except for the specified ones. Use this parameter to specify which fields you would like to get back in the response. Fields that are not explicitly named are excluded. Args: client (Client): QRadar client to perform the API call. args (Dict): Demisto args. Returns: CommandResults. """ rule_id = args.get('rule_id') rule_type = args.get('rule_type') range_ = f'''items={args.get('range', DEFAULT_RANGE_VALUE)}''' filter_ = args.get('filter') fields = args.get('fields') if not filter_ and rule_type: filter_ = f'type={rule_type}' # if this call fails, raise an error and stop command execution response = client.rules_list(rule_id, range_, filter_, fields) outputs = sanitize_outputs(response, RULES_OLD_NEW_MAP) headers = build_headers(['ID', 'Name', 'Type'], set(RULES_OLD_NEW_MAP.values())) return CommandResults( readable_output=tableToMarkdown('Rules List', outputs, headers=headers, removeNull=True), outputs_prefix='QRadar.Rule', outputs_key_field='ID', outputs=outputs, raw_response=response ) def qradar_rule_groups_list_command(client: Client, args: Dict) -> CommandResults: """ Retrieves list of rule groups from QRadar service. possible arguments: - rule_group_id: Retrieves details of the specific rule group that corresponds to the ID given. - range: Range of offenses to return (e.g.: 0-20, 3-5, 3-3). - filter: Query filter to filter results returned by QRadar service. see https://www.ibm.com/support/knowledgecenter/SS42VS_SHR/com.ibm.qradarapi.doc/c_rest_api_filtering.html for more details. - fields: If used, will filter all fields except for the specified ones. Use this parameter to specify which fields you would like to get back in the response. Fields that are not explicitly named are excluded. Args: client (Client): QRadar client to perform the API call. args (Dict): Demisto args. Returns: CommandResults. """ rule_group_id = arg_to_number(args.get('rule_group_id')) range_ = f'''items={args.get('range', DEFAULT_RANGE_VALUE)}''' filter_ = args.get('filter') fields = args.get('fields') # if this call fails, raise an error and stop command execution response = client.rule_groups_list(range_, rule_group_id, filter_, fields) outputs = sanitize_outputs(response, RULES_GROUP_OLD_NEW_MAP) headers = build_headers(['ID', 'Name', 'Description', 'Owner'], set(RULES_GROUP_OLD_NEW_MAP.values())) return CommandResults( readable_output=tableToMarkdown('Rules Group List', outputs, headers, removeNull=True), outputs_prefix='QRadar.RuleGroup', outputs_key_field='ID', outputs=outputs, raw_response=response ) def qradar_assets_list_command(client: Client, args: Dict) -> CommandResults: """ Retrieves list of assets from QRadar service. possible arguments: - asset_id: Retrieves details of the specific asset that corresponds to the ID given. - range: Range of offenses to return (e.g.: 0-20, 3-5, 3-3). - filter: Query filter to filter results returned by QRadar service. see https://www.ibm.com/support/knowledgecenter/SS42VS_SHR/com.ibm.qradarapi.doc/c_rest_api_filtering.html for more details. - fields: If used, will filter all fields except for the specified ones. Use this parameter to specify which fields you would like to get back in the response. Fields that are not explicitly named are excluded. Args: client (Client): QRadar client to perform the API call. args (Dict): Demisto args. Returns: CommandResults. """ asset_id = args.get('asset_id') range_ = f'''items={args.get('range', DEFAULT_RANGE_VALUE)}''' filter_ = args.get('filter') fields = args.get('fields') # If asset ID was given, override filter if both filter and asset ID were given. if asset_id: filter_ = f'id={asset_id}' full_enrichment = True if asset_id else False # if this call fails, raise an error and stop command execution response = client.assets_list(range_, filter_, fields) enriched_outputs = enrich_assets_results(client, response, full_enrichment) assets_results = dict() assets_hr = [] endpoints = [] for output in enriched_outputs: output['Asset']['hostnames'] = add_iso_entries_to_dict(output.get('Asset', dict()).get('hostnames', [])) output['Asset']['users'] = add_iso_entries_to_dict(output.get('Asset', dict()).get('users', [])) output['Asset']['products'] = add_iso_entries_to_dict(output.get('Asset', dict()).get('products', [])) output['Asset'] = sanitize_outputs(output.get('Asset'), ASSET_OLD_NEW_MAP)[0] assets_hr.append(output['Asset']) assets_results[f'''QRadar.Asset(val.ID === "{output['Asset']['ID']}")'''] = output['Asset'] sanitized_endpoint = remove_empty_elements(output.get('Endpoint', dict())) if sanitized_endpoint: endpoints.append(sanitized_endpoint) asset_human_readable = tableToMarkdown('Assets List', assets_hr, removeNull=True) endpoints_human_readable = tableToMarkdown('Endpoints', endpoints, removeNull=True) if endpoints: assets_results['Endpoint'] = endpoints return CommandResults( readable_output=asset_human_readable + endpoints_human_readable, outputs=assets_results, raw_response=response ) def qradar_saved_searches_list_command(client: Client, args: Dict) -> CommandResults: """ Retrieves list of saved searches from QRadar service. possible arguments: - saved_search_id: Retrieves details of the specific saved search that corresponds to the ID given. - range: Range of offenses to return (e.g.: 0-20, 3-5, 3-3). - filter: Query filter to filter results returned by QRadar service. see https://www.ibm.com/support/knowledgecenter/SS42VS_SHR/com.ibm.qradarapi.doc/c_rest_api_filtering.html for more details. - fields: If used, will filter all fields except for the specified ones. Use this parameter to specify which fields you would like to get back in the response. Fields that are not explicitly named are excluded. Args: client (Client): QRadar client to perform the API call. args (Dict): Demisto args. Returns: CommandResults. """ saved_search_id = args.get('saved_search_id') timeout: Optional[int] = arg_to_number(args.get('timeout', DEFAULT_TIMEOUT_VALUE)) range_ = f'''items={args.get('range', DEFAULT_RANGE_VALUE)}''' filter_ = args.get('filter') fields = args.get('fields') # if this call fails, raise an error and stop command execution response = client.saved_searches_list(range_, timeout, saved_search_id, filter_, fields) outputs = sanitize_outputs(response, SAVED_SEARCH_OLD_NEW_MAP) headers = build_headers(['ID', 'Name', 'Description'], set(SAVED_SEARCH_OLD_NEW_MAP.values())) return CommandResults( readable_output=tableToMarkdown('Saved Searches List', outputs, headers, removeNull=True), outputs_prefix='QRadar.SavedSearch', outputs_key_field='ID', outputs=outputs, raw_response=response ) def qradar_searches_list_command(client: Client, args: Dict) -> CommandResults: """ Retrieves list of searches IDs from QRadar service. possible arguments: - range: Range of offenses to return (e.g.: 0-20, 3-5, 3-3). - filter: Query filter to filter results returned by QRadar service. see https://www.ibm.com/support/knowledgecenter/SS42VS_SHR/com.ibm.qradarapi.doc/c_rest_api_filtering.html for more details. Args: client (Client): QRadar client to perform the API call. args (Dict): Demisto args. Returns: CommandResults. """ range_ = f'''items={args.get('range', DEFAULT_RANGE_VALUE)}''' filter_ = args.get('filter') # if this call fails, raise an error and stop command execution response = client.searches_list(range_, filter_) outputs = [{'SearchID': search_id} for search_id in response] return CommandResults( readable_output=tableToMarkdown('Search ID List', outputs), outputs_prefix='QRadar.SearchID', outputs_key_field='SearchID', outputs=outputs, raw_response=response ) def qradar_search_create_command(client: Client, args: Dict) -> CommandResults: """ Create a search in QRadar service. possible arguments: - query_expression: The AQL query to execute. Mutually exclusive with saved_search_id. - saved_search_id: Saved search ID to execute. Mutually exclusive with query_expression. Args: client (Client): QRadar client to perform the API call. args (Dict): Demisto args. Returns: CommandResults. """ query_expression = args.get('query_expression') saved_search_id = args.get('saved_search_id') # if this call fails, raise an error and stop command execution response = client.search_create(query_expression, saved_search_id) outputs = sanitize_outputs(response, SEARCH_OLD_NEW_MAP) return CommandResults( readable_output=tableToMarkdown('Create Search', outputs), outputs_prefix='QRadar.Search', outputs_key_field='ID', outputs=outputs, raw_response=response ) def qradar_search_status_get_command(client: Client, args: Dict) -> CommandResults: """ Retrieves search status from QRadar service. possible arguments: - search_id (Required): The search ID to retrieve its status. Args: client (Client): QRadar client to perform the API call. args (Dict): Demisto args. Returns: CommandResults. """ search_id: str = args.get('search_id', '') # if this call fails, raise an error and stop command execution response = client.search_status_get(search_id) outputs = sanitize_outputs(response, SEARCH_OLD_NEW_MAP) return CommandResults( readable_output=tableToMarkdown(f'Search Status For Search ID {search_id}', outputs), outputs_prefix='QRadar.Search', outputs_key_field='ID', outputs=outputs, raw_response=response ) def qradar_search_results_get_command(client: Client, args: Dict) -> CommandResults: """ Retrieves search results from QRadar service. possible arguments: - search_id: Search ID to retrieve its results. - output_path: If specified, will be context output path prefix. - range: Range of offenses to return (e.g.: 0-20, 3-5, 3-3). Args: client (Client): QRadar client to perform the API call. args (Dict): Demisto args. Returns: CommandResults. """ search_id: str = args.get('search_id', '') output_path = args.get('output_path') # Using or instead of default value for QRadarFullSearch backward compatibility range_ = f'''items={args.get('range') or DEFAULT_RANGE_VALUE}''' # if this call fails, raise an error and stop command execution response = client.search_results_get(search_id, range_) if not response: raise DemistoException('Unexpected response from QRadar service.') result_key = list(response.keys())[0] outputs = sanitize_outputs(response.get(result_key)) outputs_prefix = output_path if output_path else f'QRadar.Search(val.ID === "{search_id}").Result.{result_key}' return CommandResults( readable_output=tableToMarkdown(f'Search Results For Search ID {search_id}', outputs), outputs_prefix=outputs_prefix, outputs=outputs, raw_response=response ) def qradar_reference_sets_list_command(client: Client, args: Dict) -> CommandResults: """ Retrieves list of reference sets from QRadar service. possible arguments: - ref_name: Retrieves details of the specific reference that corresponds to the reference name given. - range: Range of offenses to return (e.g.: 0-20, 3-5, 3-3). - filter: Query filter to filter results returned by QRadar service. see https://www.ibm.com/support/knowledgecenter/SS42VS_SHR/com.ibm.qradarapi.doc/c_rest_api_filtering.html for more details. - fields: If used, will filter all fields except for the specified ones. Use this parameter to specify which fields you would like to get back in the response. Fields that are not explicitly named are excluded. Args: client (Client): QRadar client to perform the API call. args (Dict): Demisto args. Returns: CommandResults. """ ref_name = args.get('ref_name') convert_date_value = argToBoolean(args.get('date_value', False)) range_ = f'''items={args.get('range', DEFAULT_RANGE_VALUE)}''' filter_ = args.get('filter') fields = args.get('fields') # if this call fails, raise an error and stop command execution response = client.reference_sets_list(range_, ref_name, filter_, fields) if ref_name: outputs = dict(response) if convert_date_value and outputs.get('element_type') == 'DATE': for data_entry in outputs.get('data', []): data_entry['value'] = get_time_parameter(data_entry.get('value'), iso_format=True) outputs['data'] = sanitize_outputs(outputs.get('data', []), REFERENCE_SET_DATA_OLD_NEW_MAP) else: outputs = response final_outputs = sanitize_outputs(outputs, REFERENCE_SETS_OLD_NEW_MAP) headers = build_headers(['Name', 'ElementType', 'Data', 'TimeToLive', 'TimeoutType'], set(REFERENCE_SETS_OLD_NEW_MAP.values())) return CommandResults( readable_output=tableToMarkdown('Reference Sets List', final_outputs, headers, removeNull=True), outputs_prefix='QRadar.Reference', outputs_key_field='Name', outputs=final_outputs, raw_response=response ) def qradar_reference_set_create_command(client: Client, args: Dict) -> CommandResults: """ Create a new reference set. possible arguments: - ref_name (Required): The name of the new reference set. - element_type (Required): The type of the new reference set. Can be ALN (alphanumeric), ALNIC (alphanumeric ignore case), IP (IP address), NUM (numeric), PORT (port number) or DATE. - timeout_type: Indicates if the time_to_live interval is based on when the data was first seen or last seen. The allowed values are 'FIRST_SEEN', 'LAST_SEEN' and 'UNKNOWN'. The default value is 'UNKNOWN'. - time_to_live: The time to live interval, for example: '1 month' or '5 minutes'. - fields: If used, will filter all fields except for the specified ones. Use this parameter to specify which fields you would like to get back in the response. Fields that are not explicitly named are excluded. Args: client (Client): QRadar client to perform the API call. args (Dict): Demisto args. Returns: CommandResults. """ ref_name: str = args.get('ref_name', '') element_type: str = args.get('element_type', '') timeout_type = args.get('timeout_type') time_to_live = args.get('time_to_live') fields = args.get('fields') # if this call fails, raise an error and stop command execution response = client.reference_set_create(ref_name, element_type, timeout_type, time_to_live, fields) outputs = sanitize_outputs(response, REFERENCE_SETS_OLD_NEW_MAP) headers = build_headers(['Name', 'ElementType', 'Data', 'TimeToLive', 'TimeoutType'], set(REFERENCE_SETS_OLD_NEW_MAP.values())) return CommandResults( readable_output=tableToMarkdown('Reference Set Create', outputs, headers, removeNull=True), outputs_prefix='QRadar.Reference', outputs_key_field='Name', outputs=outputs, raw_response=response ) def qradar_reference_set_delete_command(client: Client, args: Dict) -> CommandResults: """ Removes a reference set or purges its contents. possible arguments: - ref_name (Required): The name of the new reference set. - purge_only: Indicates if the reference set should have its contents purged (true), keeping the reference set structure. If the value is 'false', or not specified the reference set is removed completely. Default is 'false'. - fields: If used, will filter all fields except for the specified ones. Use this parameter to specify which fields you would like to get back in the response. Fields that are not explicitly named are excluded. Args: client (Client): QRadar client to perform the API call. args (Dict): Demisto args. Returns: CommandResults. """ ref_name: str = args.get('ref_name', '') purge_only = args.get('purge_only') fields = args.get('fields') # if this call fails, raise an error and stop command execution response = client.reference_set_delete(ref_name, purge_only, fields) return CommandResults( raw_response=response, readable_output=f'Request to delete reference {ref_name} was submitted.' f''' Current deletion status: {response.get('status', 'Unknown')}''') def qradar_reference_set_value_upsert_command(client: Client, args: Dict) -> CommandResults: """ Update or insert new value to a reference set from QRadar service. possible arguments: - ref_name (Required): The reference name to insert/update a value for. - values (Required): Comma separated list. All the values to be inserted/updated. - source: An indication of where the data originated. Default is reference data api. - date_value: Boolean, specifies if values given are dates or not. - range: Range of offenses to return (e.g.: 0-20, 3-5, 3-3). - fields: If used, will filter all fields except for the specified ones. Use this parameter to specify which fields you would like to get back in the response. Fields that are not explicitly named are excluded. Args: client (Client): QRadar client to perform the API call. args (Dict): Demisto args. Returns: CommandResults. """ ref_name: str = args.get('ref_name', '') values: List[str] = argToList(args.get('value', '')) if not values: raise DemistoException('Value to insert must be given.') source = args.get('source') date_value = argToBoolean(args.get('date_value', False)) fields = args.get('fields') if date_value: values = [get_time_parameter(value, epoch_format=True) for value in values] # if one of these calls fail, raise an error and stop command execution if len(values) == 1: response = client.reference_set_value_upsert(ref_name, values[0], source, fields) else: response = client.indicators_upload(ref_name, values, fields) outputs = sanitize_outputs(response, REFERENCE_SETS_OLD_NEW_MAP) return CommandResults( readable_output=tableToMarkdown('Reference Update Create', outputs, ['Name', 'ElementType', 'TimeToLive', 'TimeoutType', 'NumberOfElements', 'CreationTime'], removeNull=True), outputs_prefix='QRadar.Reference', outputs_key_field='Name', outputs=outputs, raw_response=response ) def qradar_reference_set_value_delete_command(client: Client, args: Dict) -> CommandResults: """ Delete a value in reference set from QRadar service. possible arguments: - ref_name (Required): The reference name to insert/update a value for. - value (Required): Value to be deleted. - date_value: Boolean, specifies if values given are dates or not. Args: client (Client): QRadar client to perform the API call. args (Dict): Demisto args. Returns: CommandResults. """ ref_name: str = args.get('ref_name', '') value: str = args.get('value', '') date_value = argToBoolean(args.get('date_value', False)) original_value = value if date_value: value = get_time_parameter(original_value, epoch_format=True) # if this call fails, raise an error and stop command execution response = client.reference_set_value_delete(ref_name, value) human_readable = f'### value: {original_value} of reference: {ref_name} was deleted successfully' return CommandResults( readable_output=human_readable, raw_response=response ) def qradar_domains_list_command(client: Client, args: Dict) -> CommandResults: """ Retrieves list of domains sets from QRadar service. If you do not have the System Administrator or Security Administrator permissions, then for each domain assigned to your security profile you can only view the values for the id and name fields. All other values return null. possible arguments: - domain_id: Retrieves details of the specific domain that corresponds to the ID given. - range: Range of offenses to return (e.g.: 0-20, 3-5, 3-3). - filter: Query filter to filter results returned by QRadar service. see https://www.ibm.com/support/knowledgecenter/SS42VS_SHR/com.ibm.qradarapi.doc/c_rest_api_filtering.html for more details. - fields: If used, will filter all fields except for the specified ones. Use this parameter to specify which fields you would like to get back in the response. Fields that are not explicitly named are excluded. Args: client (Client): QRadar client to perform the API call. args (Dict): Demisto args. Returns: CommandResults. """ # backward compatibility for domain_id argument named is 'id' in QRadar v2. domain_id = args.get('domain_id') or args.get('id') range_ = f'''items={args.get('range', DEFAULT_RANGE_VALUE)}''' filter_ = args.get('filter') fields = args.get('fields') # if this call fails, raise an error and stop command execution response = client.domains_list(domain_id, range_, filter_, fields) outputs = sanitize_outputs(response, DOMAIN_OLD_NEW_MAP) return CommandResults( readable_output=tableToMarkdown('Domains List', outputs, removeNull=True), outputs_prefix='QRadar.Domains', outputs_key_field='ID', outputs=outputs, raw_response=response ) def qradar_indicators_upload_command(client: Client, args: Dict) -> CommandResults: """ Uploads list of indicators from Demisto to a reference set in QRadar service. possible arguments: - ref_name (Required): Name of the reference set to upload indicators to. - query: The query for getting indicators from Demisto. - limit: Maximum number of indicators to fetch from Demisto. - page: The page from which to get the indicators. - fields: If used, will filter all fields except for the specified ones. Use this parameter to specify which fields you would like to get back in the response. Fields that are not explicitly named are excluded. Args: client (Client): QRadar client to perform the API call. args (Dict): Demisto args. Returns: CommandResults. """ ref_name: str = args.get('ref_name', '') query = args.get('query') limit = arg_to_number(args.get('limit', DEFAULT_LIMIT_VALUE)) page = arg_to_number(args.get('page', 0)) fields = args.get('fields') # Backward compatibility for QRadar V2 command. Create reference set for given 'ref_name' if does not exist. element_type = args.get('element_type', '') timeout_type = args.get('timeout_type') time_to_live = args.get('time_to_live') try: client.reference_sets_list(ref_name=ref_name) except DemistoException as e: # Create reference set if does not exist if e.message and f'{ref_name} does not exist' in e.message: # if this call fails, raise an error and stop command execution client.reference_set_create(ref_name, element_type, timeout_type, time_to_live) else: raise e search_indicators = IndicatorsSearcher(page=page) indicators = search_indicators.search_indicators_by_version(query=query, size=limit).get('iocs', []) indicators_data = [{'Indicator Value': indicator.get('value'), 'Indicator Type': indicator.get('indicator_type')} for indicator in indicators if 'value' in indicator and 'indicator_type' in indicator] indicator_values: List[Any] = [indicator.get('Indicator Value') for indicator in indicators_data] if not indicators_data: return CommandResults( readable_output=f'No indicators were found for reference set {ref_name}' ) # if this call fails, raise an error and stop command execution response = client.indicators_upload(ref_name, indicator_values, fields) outputs = sanitize_outputs(response) reference_set_hr = tableToMarkdown(f'Indicators Upload For Reference Set {ref_name}', outputs) indicators_uploaded_hr = tableToMarkdown('Indicators Uploaded', indicators_data) return CommandResults( readable_output=f'{reference_set_hr}\n{indicators_uploaded_hr}', outputs_prefix='QRadar.Reference', outputs_key_field='name', outputs=outputs, raw_response=response ) def flatten_nested_geolocation_values(geolocation_dict: Dict, dict_key: str, nested_value_keys: List[str]) -> Dict: """ Receives output from geolocation IPs command, and does: 1) flattens output, takes nested keys values. 2) Converts keys to prefix of 'dict_key' and suffix of nested key as camel case. Args: geolocation_dict (Dict): The dict to flatten. dict_key (Dict): The key of the inner dict to use his values. nested_value_keys (Dict): The keys inside inner dict to take. Returns: (Dict): dict of ({dict_key_name}{camel case nested key}: {nested key value} """ return {f'{camelize_string(dict_key)}{camelize_string(k)}': geolocation_dict.get(dict_key, dict()).get(k) for k in nested_value_keys} def qradar_geolocations_for_ip_command(client: Client, args: Dict) -> CommandResults: """ Retrieves the MaxMind geoip data for the given IP addresses. possible arguments: - ip (Required): Comma separated list. the IPs to retrieve data for. - fields: If used, will filter all fields except for the specified ones. Use this parameter to specify which fields you would like to get back in the response. Fields that are not explicitly named are excluded. Args: client (Client): QRadar client to perform the API call. args (Dict): Demisto args. Returns: CommandResults. """ ips = argToList(args.get('ip')) filter_ = f'''ip_address IN ({','.join(map(lambda ip: f''{str(ip)}'', ips))})''' fields = args.get('fields') # if this call fails, raise an error and stop command execution response = client.geolocations_for_ip(filter_, fields) outputs = [] for output in response: city_values = flatten_nested_geolocation_values(output, 'city', ['name']) continent_values = flatten_nested_geolocation_values(output, 'continent', ['name']) location_values = flatten_nested_geolocation_values(output, 'location', ['accuracy_radius', 'average_income', 'latitude', 'longitude', 'metro_code', 'population_density', 'timezone']) physical_country_values = flatten_nested_geolocation_values(output, 'physical_country', ['iso_code', 'name']) registered_country_values = flatten_nested_geolocation_values(output, 'registered_country', ['iso_code', 'name']) represented_country_values = flatten_nested_geolocation_values(output, 'represented_country', ['iso_code', 'name', 'confidence']) subdivision_values = flatten_nested_geolocation_values(output, 'subdivision', ['name', 'iso_code', 'confidence']) non_nested_values = { 'IPAddress': output.get('ip_address'), 'Traits': output.get('traits'), 'Coordinates': output.get('geo_json', dict()).get('coordinates'), 'PostalCode': output.get('postal', dict()).get('postal_code'), 'PostalCodeConfidence': output.get('postal', dict()).get('confidence') } final_output = dict(city_values, **continent_values, **location_values, **physical_country_values, **registered_country_values, **represented_country_values, **subdivision_values, **non_nested_values) outputs.append(final_output) final_outputs = sanitize_outputs(outputs) return CommandResults( readable_output=tableToMarkdown('Geolocation For IP', final_outputs), outputs_prefix='QRadar.GeoForIP', outputs_key_field='IPAddress', outputs=final_outputs, raw_response=response ) def qradar_log_sources_list_command(client: Client, args: Dict) -> CommandResults: """ Retrieves a list of log sources from QRadar service. possible arguments: - qrd_encryption_algorithm: The algorithm to use for encrypting the sensitive data of this endpoint. Using AES 128 - qrd_encryption_password: The password to use for encrypting the sensitive data of this endpoint. If argument was not given, will be randomly generated. - range: Range of offenses to return (e.g.: 0-20, 3-5, 3-3). - filter: Query filter to filter results returned by QRadar service. see https://www.ibm.com/support/knowledgecenter/SS42VS_SHR/com.ibm.qradarapi.doc/c_rest_api_filtering.html for more details. - fields: If used, will filter all fields except for the specified ones. Use this parameter to specify which fields you would like to get back in the response. Fields that are not explicitly named are excluded. Args: client (Client): QRadar client to perform the API call. args (Dict): Demisto args. Returns: CommandResults. """ qrd_encryption_algorithm: str = args.get('qrd_encryption_algorithm', 'AES128') qrd_encryption_password: str = args.get('qrd_encryption_password', secrets.token_urlsafe(20)) range_ = f'''items={args.get('range', DEFAULT_RANGE_VALUE)}''' filter_ = args.get('filter') fields = args.get('fields') # if this call fails, raise an error and stop command execution response = client.log_sources_list(qrd_encryption_algorithm, qrd_encryption_password, range_, filter_, fields) outputs = sanitize_outputs(response, LOG_SOURCES_OLD_NEW_MAP) headers = build_headers(['ID', 'Name', 'Description'], set(LOG_SOURCES_OLD_NEW_MAP.values())) return CommandResults( readable_output=tableToMarkdown('Log Sources List', outputs, headers, removeNull=True), outputs_prefix='QRadar.LogSource', outputs_key_field='ID', outputs=outputs, raw_response=response ) def qradar_get_custom_properties_command(client: Client, args: Dict) -> CommandResults: """ Retrieves a list of event regex properties from QRadar service. possible arguments: - field_names: A comma-separated list of names of an exact properties to search for. - range: Range of offenses to return (e.g.: 0-20, 3-5, 3-3). - filter: Query filter to filter results returned by QRadar service. see https://www.ibm.com/support/knowledgecenter/SS42VS_SHR/com.ibm.qradarapi.doc/c_rest_api_filtering.html for more details. - fields: If used, will filter all fields except for the specified ones. Use this parameter to specify which fields you would like to get back in the response. Fields that are not explicitly named are excluded. Args: client (Client): QRadar client to perform the API call. args (Dict): Demisto args. Returns: CommandResults. """ limit = arg_to_number(args.get('limit', DEFAULT_LIMIT_VALUE)) if limit: range_ = f'items=0-{limit - 1}' else: range_ = f'''items={args.get('range', DEFAULT_RANGE_VALUE)}''' like_names = argToList(args.get('like_name')) field_names = argToList(args.get('field_name')) filter_ = args.get('filter', '') fields = args.get('fields') if not filter_: if field_names: filter_ += f'''name IN ({','.join(map(lambda name: f''{str(name)}'', field_names))})''' if like_names: filter_ += ' or '.join(map(lambda like: f' name ILIKE "%{like}%"', like_names)) # if this call fails, raise an error and stop command execution response = client.custom_properties(range_, filter_, fields) outputs = sanitize_outputs(response) return CommandResults( readable_output=tableToMarkdown('Custom Properties', outputs, removeNull=True), outputs_prefix='QRadar.Properties', outputs_key_field='identifier', outputs=outputs, raw_response=response ) def perform_ips_command_request(client: Client, args: Dict[str, Any], is_destination_addresses: bool): """ Performs request to QRadar IPs endpoint. Args: client (Client): Client to perform the request to QRadar service. args (Dict[str, Any]): XSOAR arguments. is_destination_addresses (bool): Whether request is for destination addresses or source addresses. Returns: - Request response. """ range_: str = f'''items={args.get('range', DEFAULT_RANGE_VALUE)}''' filter_: Optional[str] = args.get('filter') fields: Optional[str] = args.get('fields') address_type = 'local_destination' if is_destination_addresses else 'source' ips_arg_name: str = f'{address_type}_ip' ips: List[str] = argToList(args.get(ips_arg_name, [])) if ips and filter_: raise DemistoException(f'Both filter and {ips_arg_name} have been supplied. Please supply only one.') if ips: filter_ = ' OR '.join([f'{ips_arg_name}="{ip_}"' for ip_ in ips]) url_suffix = f'{address_type}_addresses' # if this call fails, raise an error and stop command execution response = client.get_addresses(url_suffix, filter_, fields, range_) return response def qradar_ips_source_get_command(client: Client, args: Dict[str, Any]) -> CommandResults: """ Get source IPS from QRadar service. Args: client (Client): Client to perform API calls to QRadar service. args (Dict[str, Any): XSOAR arguments. Returns: (CommandResults). """ response = perform_ips_command_request(client, args, is_destination_addresses=False) outputs = sanitize_outputs(response, SOURCE_IPS_OLD_NEW_MAP) return CommandResults( readable_output=tableToMarkdown('Source IPs', outputs), outputs_prefix='QRadar.SourceIP', outputs_key_field='ID', outputs=outputs, raw_response=response ) def qradar_ips_local_destination_get_command(client: Client, args: Dict[str, Any]) -> CommandResults: """ Get local destination IPS from QRadar service. Args: client (Client): Client to perform API calls to QRadar service. args (Dict[str, Any): XSOAR arguments. Returns: (CommandResults). """ response = perform_ips_command_request(client, args, is_destination_addresses=True) outputs = sanitize_outputs(response, LOCAL_DESTINATION_IPS_OLD_NEW_MAP) return CommandResults( readable_output=tableToMarkdown('Local Destination IPs', outputs), outputs_prefix='QRadar.LocalDestinationIP', outputs_key_field='ID', outputs=outputs, raw_response=response ) def qradar_reset_last_run_command() -> str: """ Puts the reset flag inside integration context. Returns: (str): 'fetch-incidents was reset successfully'. """ ctx = get_integration_context() ctx[RESET_KEY] = True set_to_integration_context_with_retries(ctx) return 'fetch-incidents was reset successfully.' def qradar_get_mapping_fields_command(client: Client) -> Dict: """ Returns Dict object containing the list of fields for an incident type. This command should be used for debugging purposes. Args: client (Client): Client to perform API calls. Returns: (Dict): Contains all the mapping. """ offense = { 'username_count': 'int', 'description': 'str', 'rules': { 'id': 'int', 'type': 'str', 'name': 'str' }, 'event_count': 'int', 'flow_count': 'int', 'assigned_to': 'NoneType', 'security_category_count': 'int', 'follow_up': 'bool', 'source_address_ids': 'str', 'source_count': 'int', 'inactive': 'bool', 'protected': 'bool', 'closing_user': 'str', 'destination_networks': 'str', 'source_network': 'str', 'category_count': 'int', 'close_time': 'str', 'remote_destination_count': 'int', 'start_time': 'str', 'magnitude': 'int', 'last_updated_time': 'str', 'credibility': 'int', 'id': 'int', 'categories': 'str', 'severity': 'int', 'policy_category_count': 'int', 'closing_reason_id': 'str', 'device_count': 'int', 'offense_type': 'str', 'relevance': 'int', 'domain_id': 'int', 'offense_source': 'str', 'local_destination_address_ids': 'int', 'local_destination_count': 'int', 'status': 'str', 'domain_name': 'str' } events = { 'events': { 'qidname_qid': 'str', 'logsourcename_logsourceid': 'str', 'categoryname_highlevelcategory': 'str', 'categoryname_category': 'str', 'protocolname_protocolid': 'str', 'sourceip': 'str', 'sourceport': 'int', 'destinationip': 'str', 'destinationport': 'int', 'qiddescription_qid': 'str', 'username': 'NoneType', 'rulename_creeventlist': 'str', 'sourcegeographiclocation': 'str', 'sourceMAC': 'str', 'sourcev6': 'str', 'destinationgeographiclocation': 'str', 'destinationv6': 'str', 'logsourcetypename_devicetype': 'str', 'credibility': 'int', 'severity': 'int', 'magnitude': 'int', 'eventcount': 'int', 'eventDirection': 'str', 'postNatDestinationIP': 'str', 'postNatDestinationPort': 'int', 'postNatSourceIP': 'str', 'postNatSourcePort': 'int', 'preNatDestinationPort': 'int', 'preNatSourceIP': 'str', 'preNatSourcePort': 'int', 'utf8_payload': 'str', 'starttime': 'str', 'devicetime': 'int' } } assets = { 'assets': { 'interfaces': { 'mac_address': 'str', 'ip_addresses': { 'type': 'str', 'value': 'str' }, 'id': 'int', 'Unified Name': 'str', 'Technical User': 'str', 'Switch ID': 'str', 'Business Contact': 'str', 'CVSS Availability Requirement': 'str', 'Compliance Notes': 'str', 'Primary OS ID': 'str', 'Compliance Plan': 'str', 'Switch Port ID': 'str', 'Weight': 'str', 'Location': 'str', 'CVSS Confidentiality Requirement': 'str', 'Technical Contact': 'str', 'Technical Owner': 'str', 'CVSS Collateral Damage Potential': 'str', 'Description': 'str', 'Business Owner': 'str', 'CVSS Integrity Requirement': 'str' }, 'id': 'int', 'domain_id': 'int', 'domain_name': 'str' } } # if this call fails, raise an error and stop command execution custom_fields = { 'events': {field.get('name'): field.get('property_type') for field in client.custom_properties() if 'name' in field and 'property_type' in field} } fields = { 'Offense': offense, 'Events: Builtin Fields': events, 'Events: Custom Fields': custom_fields, 'Assets': assets, } return fields def update_events_mirror_message(mirror_options: Optional[Any], events_limit: int, failure_message: str, events_count: int, events_mirrored: int) -> str: """Return the offense's events' mirror error message. Args: mirror_options (str): The mirror options for the instance. events_limit (int): The events limit for the mirroring. failure_message (str): A failure message if there was a failure during fetching of events. events_count (int): The number of events in the offense. events_mirrored (int): The number of events mirrored in the offense Returns: (str) An updated offense events mirror message. """ mirroring_events_message = 'Unknown' print_debug_msg(f"mirror_options {mirror_options}\n events_limit {events_limit} \n" f"failure_message {failure_message}\n events_count {events_count}\n " f"events_mirrored {events_mirrored}") if mirror_options != MIRROR_OFFENSE_AND_EVENTS: mirroring_events_message = '' elif events_mirrored < min(events_count, events_limit) and failure_message: mirroring_events_message = failure_message elif events_mirrored == events_limit: mirroring_events_message = 'Mirroring events has reached events limit in this incident.' elif events_mirrored == events_count: mirroring_events_message = 'All available events in the offense were mirrored.' return mirroring_events_message def json_loads_inner(json_dumps_list: List[str]) -> list: """ Json load values of list. Args: json_dumps_list: A list with json dumps as nodes. Returns: json loaded list of the json dumps in the original list. """ python_object_list = [] for json_dump in json_dumps_list: try: python_object_list.append(json.loads(json_dump)) except Exception as e: demisto.error(f'Exception {e} when trying to json parse {json_dump}, as part of {json_dumps_list}') raise e return python_object_list def json_dumps_inner(listed_objects: list) -> List[str]: """ Json dump values of list. Args: listed_objects: A list with nodes to be json dumped. Returns: json dumped list of the json dumps in the original list. """ listed_json_dumps = [] for python_object in listed_objects: listed_json_dumps.append(json.dumps(python_object)) return listed_json_dumps def extract_context_data(context_data: dict, include_id: bool = False) -> dict: """Transform the context data from partially json encoded to fully decoded. Args: context_data: The context data. include_id: Whether to include id in the encoding of the data. Returns: The extracted context data. """ new_context_data = context_data.copy() new_context_data.pop(LAST_FETCH_KEY, None) if not new_context_data: new_context_data = {} new_context_data.update({ UPDATED_MIRRORED_OFFENSES_CTX_KEY: json_loads_inner(json.loads( context_data.get(UPDATED_MIRRORED_OFFENSES_CTX_KEY, '[]'))), MIRRORED_OFFENSES_CTX_KEY: json_loads_inner(json.loads(context_data.get(MIRRORED_OFFENSES_CTX_KEY, '[]'))), RESUBMITTED_MIRRORED_OFFENSES_CTX_KEY: json_loads_inner(json.loads( context_data.get(RESUBMITTED_MIRRORED_OFFENSES_CTX_KEY, '[]'))), 'samples': json_loads_inner(json.loads(context_data.get('samples', '[]'))), 'last_mirror_update': json.loads(context_data.get('last_mirror_update', '0')) }) if include_id and LAST_FETCH_KEY in context_data: new_context_data.update({LAST_FETCH_KEY: int(json.loads(context_data.get(LAST_FETCH_KEY, '0')))}) return new_context_data def encode_context_data(context_data: dict, include_id: bool = False) -> dict: """Transform the context data from a decoded python object form to a partially json encoded form. This is done in order to maintain compatibility with the set_to_integration_context_with_retries command. Args: context_data: The context data in its decoded python object form include_id: Whether to include id in the encoding of the data. Returns: The context data in its partially json encoded form. """ new_context_data = context_data.copy() new_context_data.pop('retry_compatible', None) new_context_data.pop(LAST_FETCH_KEY, None) new_context_data.pop(RESET_KEY, None) new_context_data.update({ UPDATED_MIRRORED_OFFENSES_CTX_KEY: json_dumps_inner(context_data.get(UPDATED_MIRRORED_OFFENSES_CTX_KEY, [])), MIRRORED_OFFENSES_CTX_KEY: json_dumps_inner(context_data.get(MIRRORED_OFFENSES_CTX_KEY, [])), RESUBMITTED_MIRRORED_OFFENSES_CTX_KEY: json_dumps_inner(context_data.get(RESUBMITTED_MIRRORED_OFFENSES_CTX_KEY, [])), 'samples': json_dumps_inner(context_data.get('samples', [])), 'last_mirror_update': str(context_data.get('last_mirror_update', 0)) }) if include_id and LAST_FETCH_KEY in context_data: new_context_data.update({LAST_FETCH_KEY: int(context_data.get(LAST_FETCH_KEY, 0))}) return new_context_data @safely_update_context_data def remove_offense_from_context_data(context_data: dict, version: Any, offense_id: str, offense_to_remove: str) -> Tuple[dict, Any, dict]: """Remove an offense from context data UPDATED_MIRRORED_OFFENSES_CTX_KEY and RESUBMITTED_MIRRORED_OFFENSES_CTX_KEY. Args: context_data: The context data to update. version: The version of the context data to update. offense_id: The offense id to remove from RESUBMITTED_MIRRORED_OFFENSES_CTX_KEY. offense_to_remove: The offense to remove from UPDATED_MIRRORED_OFFENSES_CTX_KEY. Returns: (The new context_data, The context_data version the change was based on, The new context_data) """ updated = context_data.get(UPDATED_MIRRORED_OFFENSES_CTX_KEY, []) resubmitted = context_data.get(RESUBMITTED_MIRRORED_OFFENSES_CTX_KEY, []) if offense_to_remove and offense_to_remove in updated: updated.remove(offense_to_remove) if offense_id in resubmitted: resubmitted.remove(offense_id) context_data[RESUBMITTED_MIRRORED_OFFENSES_CTX_KEY] = resubmitted context_data[UPDATED_MIRRORED_OFFENSES_CTX_KEY] = updated return encode_context_data(context_data), version, context_data def get_remote_data_command(client: Client, params: Dict[str, Any], args: Dict) -> GetRemoteDataResponse: """ get-remote-data command: Returns an updated incident and entries If offense's events were updated in the long running container, update the demisto incident. Args: client (Client): QRadar client to perform the API calls. params (Dict): Demisto params. args (Dict): id: Offense id to retrieve. lastUpdate: When was the last time we data was retrieved in Epoch. Returns: GetRemoteDataResponse. """ print_debug_msg("Started GetRemoteData") remote_args = GetRemoteDataArgs(args) ip_enrich, asset_enrich = get_offense_enrichment(params.get('enrichment', 'IPs And Assets')) offense_id = remote_args.remote_incident_id # if this call fails, raise an error and stop command execution offense = client.offenses_list(offense_id=offense_id) offense_last_update = get_time_parameter(offense.get('last_persisted_time')) mirror_options = params.get('mirror_options') raw_context, context_version = get_integration_context_with_version() context_data = extract_context_data(raw_context.copy()) events_limit = int(params.get('events_limit') or DEFAULT_EVENTS_LIMIT) print_mirror_events_stats(context_data, f"Starting Get Remote Data For " f"Offense {str(offense.get("id"))}") demisto.debug(f'Updating offense. Offense last update was {offense_last_update}') entries = [] if offense.get('status') == 'CLOSED' and argToBoolean(params.get('close_incident', False)): demisto.debug(f'Offense is closed: {offense}') try: if closing_reason := offense.get('closing_reason_id', ''): closing_reason = client.closing_reasons_list(closing_reason).get('text') offense_close_time = offense.get('close_time', '') closed_offense_notes = client.offense_notes_list(offense_id, f'items={DEFAULT_RANGE_VALUE}', filter_=f'create_time >= {offense_close_time}') # In QRadar UI, when you close a reason, a note is added with the reason and more details. Try to get note # if exists, else fallback to closing reason only, as closing QRadar through an API call does not create a note. close_reason_with_note = next((note.get('note_text') for note in closed_offense_notes if note.get('note_text').startswith('This offense was closed with reason:')), closing_reason) if not close_reason_with_note: print_debug_msg(f'Could not find closing reason or closing note for offense with offense id {offense_id}') close_reason_with_note = 'Unknown closing reason from QRadar' else: close_reason_with_note = f'From QRadar: {close_reason_with_note}' except Exception as e: demisto.error(f'Failed to get closing reason with error: {e}') close_reason_with_note = 'Unknown closing reason from QRadar' entries.append({ 'Type': EntryType.NOTE, 'Contents': { 'dbotIncidentClose': True, 'closeReason': close_reason_with_note }, 'ContentsFormat': EntryFormat.JSON }) failure_message = 'Failed communicating with long running container.' if mirror_options == MIRROR_OFFENSE_AND_EVENTS: offenses_waiting_for_update = context_data.get(MIRRORED_OFFENSES_CTX_KEY, []) max_retries = min(MAX_FETCH_EVENT_RETIRES * (len(offenses_waiting_for_update) + 3), 20) offense_to_remove = None is_waiting_to_be_updated = True evented_offense = None retries = 0 while ((not evented_offense) or is_waiting_to_be_updated) and retries < max_retries: if retries != 0: time.sleep(FAILURE_SLEEP) raw_context, context_version = get_integration_context_with_version() context_data = extract_context_data(raw_context.copy()) print_mirror_events_stats(context_data, f"Get Remote Data Loop for id {offense.get("id")}, retry {retries}") retries += 1 offenses_with_updated_events = context_data.get(UPDATED_MIRRORED_OFFENSES_CTX_KEY, []) offenses_waiting_for_update = context_data.get(MIRRORED_OFFENSES_CTX_KEY, []) evented_offense = [evented_offense for evented_offense in offenses_with_updated_events if str(evented_offense.get('id')) == str(offense.get("id"))] is_waiting_to_be_updated = any([True for waiting_offense in offenses_waiting_for_update if str(waiting_offense.get('id')) == str(offense.get("id"))]) if evented_offense: demisto.debug(f"Mirror Events: Offense {offense.get("id")} events were updated, updating incident.") if evented_offense[0].get('events'): offense['events'] = evented_offense[0].get('events') failure_message = evented_offense[0].get('mirroring_events_message', '') demisto.debug(f"Mirror Events: Offense {offense.get("id")} now has {len(offense.get("events"))} " f"fetched events. Mirror message: {failure_message}") offense_to_remove = evented_offense[0] elif is_waiting_to_be_updated: failure_message = 'In queue.' new_context_data = remove_offense_from_context_data(offense_id=offense_id, offense_to_remove=offense_to_remove, version=context_version, context_data=context_data) print_mirror_events_stats(new_context_data, f"Get Remote Data End for id {offense.get("id")}") enriched_offense = enrich_offenses_result(client, offense, ip_enrich, asset_enrich) final_offense_data = sanitize_outputs(enriched_offense)[0] events_message = update_events_mirror_message( mirror_options=mirror_options, events_limit=events_limit, failure_message=failure_message, events_count=int(final_offense_data.get('event_count', 0)), events_mirrored=len(final_offense_data.get('events', []))) final_offense_data['last_mirror_in_time'] = datetime.now().isoformat() final_offense_data['mirroring_events_message'] = events_message return GetRemoteDataResponse(final_offense_data, entries) @safely_update_context_data def add_modified_remote_offenses(context_data: dict, version: str, mirror_options: str, new_modified_records_ids: list, current_last_update: str, offenses: list) -> Tuple[dict, str, list]: """Add modified remote offenses to context_data and handle exhausted offenses. Args: context_data: The context data to update. version: The version of the context data to update. mirror_options: The mirror options for the integration. new_modified_records_ids: The new modified offenses ids. current_last_update: The current last mirror update. offenses: The offenses to update. Returns: (The new context data, The context_data version the changes were based on, The new modified records ids) """ new_context_data = context_data.copy() print_debug_msg(f'Saving New Highest ID: {context_data.get(LAST_FETCH_KEY, 0)}') new_context_data.update({'last_mirror_update': current_last_update}) if mirror_options == MIRROR_OFFENSE_AND_EVENTS: print_mirror_events_stats(new_context_data, "Get Modified Remote Data - Before update") mirrored_offenses = merge_lists(original_list=context_data.get(MIRRORED_OFFENSES_CTX_KEY, []), updated_list=offenses, key='id') new_context_data.update({MIRRORED_OFFENSES_CTX_KEY: mirrored_offenses}) remaining_resubmitted_offenses = context_data.get(RESUBMITTED_MIRRORED_OFFENSES_CTX_KEY, []).copy() updated_mirrored_offenses = context_data.get(UPDATED_MIRRORED_OFFENSES_CTX_KEY, []) clean_updates_mirrored_offenses = updated_mirrored_offenses.copy() if remaining_resubmitted_offenses: for offense in updated_mirrored_offenses: if str(offense.get("id")) in remaining_resubmitted_offenses: print_debug_msg(f"Removing Offense id {offense.get("id")} from processing Mirrored Events " f"since its incident is not responding. (It is probably closed)") clean_updates_mirrored_offenses.remove(offense) new_context_data.update({UPDATED_MIRRORED_OFFENSES_CTX_KEY: clean_updates_mirrored_offenses}) new_context_data.update({RESUBMITTED_MIRRORED_OFFENSES_CTX_KEY: []}) clean_updates_mirrored_offenses_ids = [str(offense.get('id')) for offense in clean_updates_mirrored_offenses] if clean_updates_mirrored_offenses_ids: new_modified_records_ids = list(set(new_modified_records_ids + clean_updates_mirrored_offenses_ids)) new_context_data.update({RESUBMITTED_MIRRORED_OFFENSES_CTX_KEY: clean_updates_mirrored_offenses_ids}) print_mirror_events_stats(new_context_data, "Get Modified Remote Data - After update") return encode_context_data(new_context_data, include_id=False), version, new_modified_records_ids def get_modified_remote_data_command(client: Client, params: Dict[str, str], args: Dict[str, str]) -> GetModifiedRemoteDataResponse: """ Performs API calls to QRadar service, querying for offenses that were updated in QRadar later than the last update time given in the argument 'lastUpdate'. Args: client (Client): QRadar client to perform the API calls. params (Dict): Demisto params. args (Dict): Demisto arguments. Returns: (GetModifiedRemoteDataResponse): IDs of the offenses that have been modified in QRadar. """ raw_ctx, ctx_version = get_integration_context_with_version() ctx = extract_context_data(raw_ctx, include_id=True) remote_args = GetModifiedRemoteDataArgs(args) highest_fetched_id = ctx.get(LAST_FETCH_KEY, 0) limit: int = int(params.get('mirror_limit', MAXIMUM_MIRROR_LIMIT)) range_ = f'items=0-{limit - 1}' last_update_time = ctx.get('last_mirror_update', 0) if not last_update_time: last_update_time = remote_args.last_update last_update = get_time_parameter(last_update_time, epoch_format=True) # if this call fails, raise an error and stop command execution offenses = client.offenses_list(range_=range_, filter_=f'id <= {highest_fetched_id} AND last_persisted_time > {last_update}', sort='+last_persisted_time', fields='id,start_time,event_count,last_persisted_time') new_modified_records_ids = [str(offense.get('id')) for offense in offenses if 'id' in offense] current_last_update = last_update if not offenses else offenses[-1].get('last_persisted_time') new_modified_records_ids = add_modified_remote_offenses(context_data=ctx, version=ctx_version, mirror_options=params.get('mirror_options'), new_modified_records_ids=new_modified_records_ids, current_last_update=current_last_update, offenses=offenses) return GetModifiedRemoteDataResponse(new_modified_records_ids) def clear_integration_ctx(ctx: dict) -> dict: """Return a cleared context_data dict so set_integration_context could be called on it. Calling set_integration_context with the output of this function ensures the next call to set_to_integration_context_with_retries will not fail. Args: ctx: The context_data to simplify Returns: The cleared context_data """ fetch_id_ctx: str = ctx.get(LAST_FETCH_KEY) or '0' try: fetch_id = int(fetch_id_ctx) except ValueError: try: fetch_id = int(json.loads(fetch_id_ctx)) except ValueError: print_debug_msg(f"Could not retrive LAST_FETCH_KEY from {fetch_id_ctx} Setting to 0") fetch_id = 0 last_update_ctx: str = ctx.get('last_mirror_update') or '0' try: last_update = str(int(last_update_ctx)) except ValueError: try: last_update = str(int(json.loads(last_update_ctx))) except ValueError: print_debug_msg(f"Could not retrive last_mirror_update from {last_update_ctx} Setting to '0'") last_update = '0' return {LAST_FETCH_KEY: json.dumps(fetch_id), 'last_mirror_update': json.dumps(last_update), UPDATED_MIRRORED_OFFENSES_CTX_KEY: '[]', MIRRORED_OFFENSES_CTX_KEY: '[]', RESUBMITTED_MIRRORED_OFFENSES_CTX_KEY: '[]', 'samples': '[]'} def change_ctx_to_be_compatible_with_retry() -> None: """ In order to move QRadar from using set_integration_context to set_to_integration_context_with_retries, the fields need to change to JSON strings. Change is required due to race condition occurring between get-modified-remote-data to long-running-execution. Because some customers already have instances running where fields are not JSON fields, this function is needed to make them be compatible with new changes. Returns: (None): Modifies context to be compatible. """ ctx = get_integration_context() new_ctx = ctx.copy() try: extracted_ctx = extract_context_data(ctx) print_mirror_events_stats(extracted_ctx, "Checking ctx") print_debug_msg("ctx was found to be compatible with retries") extract_works = True except Exception as e: print_debug_msg(f"extracting ctx {ctx} failed, trying to make it retry compatible. Error was: {str(e)}") extract_works = False if not extract_works: cleared_ctx = clear_integration_ctx(new_ctx) print_debug_msg(f"Change ctx context data was cleared and changing to {cleared_ctx}") set_integration_context(cleared_ctx) print_debug_msg(f"Change ctx context data was cleared and changed to {cleared_ctx}") ''' MAIN FUNCTION ''' def main() -> None: params = demisto.params() command = demisto.command() args = demisto.args() # handle allowed advanced parameters adv_params = params.get('adv_params') if adv_params: try: globals_ = globals() for adv_p in adv_params.split(','): adv_p_kv = [item.strip() for item in adv_p.split('=')] if len(adv_p_kv) != 2: raise DemistoException( f'Failed to parse advanced parameter: {adv_p} - please make sure you entered it correctly.') adv_param_name = adv_p_kv[0] if adv_param_name in ADVANCED_PARAMETERS_STRING_NAMES: globals_[adv_p_kv[0]] = adv_p_kv[1] elif adv_param_name in ADVANCED_PARAMETER_INT_NAMES: globals_[adv_p_kv[0]] = int(adv_p_kv[1]) else: raise DemistoException( f'The parameter: {adv_p_kv[0]} is not a valid advanced parameter. Please remove it') except DemistoException as e: raise DemistoException(f'Failed to parse advanced params. Error: {e.message}') except Exception as e: raise DemistoException(f'Failed to parse advanced params. Error: {e}') server = params.get('server') verify_certificate = not params.get('insecure', False) proxy = params.get('proxy', False) api_version = params.get('api_version') if float(api_version) < MINIMUM_API_VERSION: raise DemistoException(f'API version cannot be lower than {MINIMUM_API_VERSION}') credentials = params.get('credentials') try: client = Client( server=server, verify=verify_certificate, proxy=proxy, api_version=api_version, credentials=credentials) # All command names with or are for supporting QRadar v2 command names for backward compatibility if command == 'test-module': return_results(test_module_command(client, params)) elif command == 'fetch-incidents': demisto.incidents(fetch_incidents_command()) elif command == 'long-running-execution': change_ctx_to_be_compatible_with_retry() support_multithreading() long_running_execution_command(client, params) elif command == 'qradar-offenses-list' or command == 'qradar-offenses' or command == 'qradar-offense-by-id': return_results(qradar_offenses_list_command(client, args)) elif command == 'qradar-offense-update' or command == 'qradar-update-offense': return_results(qradar_offense_update_command(client, args)) elif command == 'qradar-closing-reasons' or command == 'qradar-get-closing-reasons': return_results(qradar_closing_reasons_list_command(client, args)) elif command == 'qradar-offense-notes-list' or command == 'qradar-get-note': return_results(qradar_offense_notes_list_command(client, args)) elif command == 'qradar-offense-note-create' or command == 'qradar-create-note': return_results(qradar_offense_notes_create_command(client, args)) elif command == 'qradar-rules-list': return_results(qradar_rules_list_command(client, args)) elif command == 'qradar-rule-groups-list': return_results(qradar_rule_groups_list_command(client, args)) elif command == 'qradar-assets-list' or command == 'qradar-get-assets' or command == 'qradar-get-asset-by-id': return_results(qradar_assets_list_command(client, args)) elif command == 'qradar-saved-searches-list': return_results(qradar_saved_searches_list_command(client, args)) elif command == 'qradar-searches-list': return_results(qradar_searches_list_command(client, args)) elif command == 'qradar-search-create' or command == 'qradar-searches': return_results(qradar_search_create_command(client, args)) elif command == 'qradar-search-status-get' or command == 'qradar-get-search': return_results(qradar_search_status_get_command(client, args)) elif command == 'qradar-search-results-get' or command == 'qradar-get-search-results': return_results(qradar_search_results_get_command(client, args)) elif command == 'qradar-reference-sets-list' or command == 'qradar-get-reference-by-name': return_results(qradar_reference_sets_list_command(client, args)) elif command == 'qradar-reference-set-create' or command == 'qradar-create-reference-set': return_results(qradar_reference_set_create_command(client, args)) elif command == 'qradar-reference-set-delete' or command == 'qradar-delete-reference-set': return_results(qradar_reference_set_delete_command(client, args)) elif command == 'qradar-reference-set-value-upsert' or command == 'qradar-create-reference-set-value' or \ command == 'qradar-update-reference-set-value': return_results(qradar_reference_set_value_upsert_command(client, args)) elif command == 'qradar-reference-set-value-delete' or command == 'qradar-delete-reference-set-value': return_results(qradar_reference_set_value_delete_command(client, args)) elif command == 'qradar-domains-list' or command == 'qradar-get-domains' or \ command == 'qradar-get-domain-by-id': return_results(qradar_domains_list_command(client, args)) elif command == 'qradar-indicators-upload' or command == 'qradar-upload-indicators': return_results(qradar_indicators_upload_command(client, args)) elif command == 'qradar-geolocations-for-ip': return_results(qradar_geolocations_for_ip_command(client, args)) elif command == 'qradar-log-sources-list': return_results(qradar_log_sources_list_command(client, args)) elif command == 'qradar-get-custom-properties': return_results(qradar_get_custom_properties_command(client, args)) elif command == 'qradar-ips-source-get': return_results(qradar_ips_source_get_command(client, args)) elif command == 'qradar-ips-local-destination-get': return_results(qradar_ips_local_destination_get_command(client, args)) elif command == 'qradar-reset-last-run': return_results(qradar_reset_last_run_command()) elif command == 'get-mapping-fields': return_results(qradar_get_mapping_fields_command(client)) elif command == 'get-remote-data': change_ctx_to_be_compatible_with_retry() return_results(get_remote_data_command(client, params, args)) elif command == 'get-modified-remote-data': change_ctx_to_be_compatible_with_retry() return_results(get_modified_remote_data_command(client, params, args)) else: raise NotImplementedError(f'''Command '{command}' is not implemented.''') # Log exceptions and return errors except Exception as e: demisto.error(traceback.format_exc()) # print the traceback print_debug_msg(f"The integration context_data is {get_integration_context()}") return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}') ''' ENTRY POINT ''' if __name__ in ('__main__', '__builtin__', 'builtins'): register_signal_handler_profiling_dump(profiling_dump_rows_limit=PROFILING_DUMP_ROWS_LIMIT) main()
import concurrent.futures import secrets from enum import Enum from ipaddress import ip_address from typing import Tuple, Set, Dict, Callable from urllib import parse import pytz import urllib3 from CommonServerUserPython import * # noqa from CommonServerPython import * # noqa # pylint: disable=unused-wildcard-import # Disable insecure warnings urllib3.disable_warnings() # pylint: disable=no-member ''' ADVANCED GLOBAL PARAMETERS ''' SAMPLE_SIZE = 2 # number of samples to store in integration context EVENTS_INTERVAL_SECS = 15 # interval between events polling EVENTS_FAILURE_LIMIT = 3 # amount of consecutive failures events fetch will tolerate FAILURE_SLEEP = 15 # sleep between consecutive failures events fetch FETCH_SLEEP = 60 # sleep between fetches BATCH_SIZE = 100 # batch size used for offense ip enrichment OFF_ENRCH_LIMIT = BATCH_SIZE * 10 # max amount of IPs to enrich per offense MAX_WORKERS = 8 # max concurrent workers used for events enriching DOMAIN_ENRCH_FLG = 'true' # when set to true, will try to enrich offense and assets with domain names RULES_ENRCH_FLG = 'true' # when set to true, will try to enrich offense with rule names MAX_FETCH_EVENT_RETIRES = 3 # max iteration to try search the events of an offense SLEEP_FETCH_EVENT_RETIRES = 10 # sleep between iteration to try search the events of an offense MAX_NUMBER_OF_OFFENSES_TO_CHECK_SEARCH = 5 # Number of offenses to check during mirroring if search was completed. DEFAULT_EVENTS_TIMEOUT = 30 # default timeout for the events enrichment in minutes PROFILING_DUMP_ROWS_LIMIT = 20 ADVANCED_PARAMETERS_STRING_NAMES = [ 'DOMAIN_ENRCH_FLG', 'RULES_ENRCH_FLG', ] ADVANCED_PARAMETER_INT_NAMES = [ 'EVENTS_INTERVAL_SECS', 'EVENTS_FAILURE_LIMIT', 'FAILURE_SLEEP', 'FETCH_SLEEP', 'BATCH_SIZE', 'OFF_ENRCH_LIMIT', 'MAX_WORKERS', 'MAX_FETCH_EVENT_RETIRES', 'SLEEP_FETCH_EVENT_RETIRES', 'DEFAULT_EVENTS_TIMEOUT', 'PROFILING_DUMP_ROWS_LIMIT', ] ''' CONSTANTS ''' API_USERNAME = '_api_token_key' RESET_KEY = 'reset' LAST_FETCH_KEY = 'id' MINIMUM_API_VERSION = 10.1 DEFAULT_RANGE_VALUE = '0-49' DEFAULT_TIMEOUT_VALUE = '35' DEFAULT_LIMIT_VALUE = 50 MAXIMUM_MIRROR_LIMIT = 100 DEFAULT_EVENTS_LIMIT = 20 MAXIMUM_OFFENSES_PER_FETCH = 50 DEFAULT_OFFENSES_PER_FETCH = 20 DEFAULT_MIRRORING_DIRECTION = 'No Mirroring' MIRROR_OFFENSE_AND_EVENTS = 'Mirror Offense and Events' MIRROR_DIRECTION: Dict[str, Optional[str]] = { 'No Mirroring': None, 'Mirror Offense': 'In', MIRROR_OFFENSE_AND_EVENTS: 'In' } MIRRORED_OFFENSES_CTX_KEY = 'mirrored_offenses' UPDATED_MIRRORED_OFFENSES_CTX_KEY = 'updated_mirrored_offenses' RESUBMITTED_MIRRORED_OFFENSES_CTX_KEY = 'resubmitted_mirrored_offenses' UTC_TIMEZONE = pytz.timezone('utc') ID_QUERY_REGEX = re.compile(r'(?:\s+|^)id((\s)*)>(=?)((\s)*)((\d)+)(?:\s+|$)') ASCENDING_ID_ORDER = '+id' EXECUTOR = concurrent.futures.ThreadPoolExecutor(max_workers=MAX_WORKERS) ''' OUTPUT FIELDS REPLACEMENT MAPS ''' OFFENSE_OLD_NEW_NAMES_MAP = { 'credibility': 'Credibility', 'relevance': 'Relevance', 'severity': 'Severity', 'assigned_to': 'AssignedTo', 'destination_networks': 'DestinationHostname', 'status': 'Status', 'closing_user': 'ClosingUser', 'closing_reason_id': 'ClosingReason', 'close_time': 'CloseTime', 'categories': 'Categories', 'follow_up': 'Followup', 'id': 'ID', 'description': 'Description', 'source_address_ids': 'SourceAddress', 'local_destination_address_ids': 'DestinationAddress', 'remote_destination_count': 'RemoteDestinationCount', 'start_time': 'StartTime', 'event_count': 'EventCount', 'flow_count': 'FlowCount', 'offense_source': 'OffenseSource', 'magnitude': 'Magnitude', 'last_updated_time': 'LastUpdatedTime', 'offense_type': 'OffenseType', 'protected': 'Protected', 'LinkToOffense': 'LinkToOffense', 'rules': 'Rules', 'domain_name': 'DomainName', 'assets': 'Assets' } CLOSING_REASONS_OLD_NEW_MAP = { 'id': 'ID', 'text': 'Name', 'is_reserved': 'IsReserved', 'is_deleted': 'IsDeleted' } NOTES_OLD_NEW_MAP = { 'id': 'ID', 'note_text': 'Text', 'create_time': 'CreateTime', 'username': 'CreatedBy' } RULES_OLD_NEW_MAP = { 'owner': 'Owner', 'base_host_id': 'BaseHostID', 'capacity_timestamp': 'CapacityTimestamp', 'origin': 'Origin', 'creation_date': 'CreationDate', 'type': 'Type', 'enabled': 'Enabled', 'modification_date': 'ModificationDate', 'name': 'Name', 'average_capacity': 'AverageCapacity', 'id': 'ID', 'base_capacity': 'BaseCapacity' } RULES_GROUP_OLD_NEW_MAP = { 'owner': 'Owner', 'modified_time': 'ModifiedTime', 'level': 'Level', 'name': 'Name', 'description': 'Description', 'id': 'ID', 'child_groups': 'ChildGroups', 'child_items': 'ChildItems', 'type': 'Type', 'parent_id': 'ParentID' } ASSET_OLD_NEW_MAP = { 'vulnerability_count': 'VulnerabilityCount', 'interfaces': 'Interfaces', 'risk_score_sum': 'RiskScoreSum', 'hostnames': 'Hostnames', 'id': 'ID', 'users': 'Users', 'domain_id': 'DomainID', 'properties': 'Properties', 'products': 'Products' } SEARCH_OLD_NEW_MAP = {'search_id': 'ID', 'status': 'Status'} REFERENCE_SETS_OLD_NEW_MAP = { 'number_of_elements': 'NumberOfElements', 'name': 'Name', 'creation_time': 'CreationTime', 'element_type': 'ElementType', 'time_to_live': 'TimeToLive', 'timeout_type': 'TimeoutType', 'data': 'Data', } REFERENCE_SET_DATA_OLD_NEW_MAP = { 'last_seen': 'LastSeen', 'source': 'Source', 'value': 'Value', 'first_seen': 'FirstSeen' } DOMAIN_OLD_NEW_MAP = { 'asset_scanner_ids': 'AssetScannerIDs', 'custom_properties': 'CustomProperties', 'deleted': 'Deleted', 'description': 'Description', 'event_collector_ids': 'EventCollectorIDs', 'flow_collector_ids': 'FlowCollectorIDs', 'flow_source_ids': 'FlowSourceIDs', 'id': 'ID', 'log_source_ids': 'LogSourceIDs', 'log_source_group_ids': 'LogSourceGroupIDs', 'name': 'Name', 'qvm_scanner_ids': 'QVMScannerIDs', 'tenant_id': 'TenantID' } SAVED_SEARCH_OLD_NEW_MAP = { 'owner': 'Owner', 'description': 'Description', 'creation_date': 'CreationDate', 'uid': 'UID', 'database': 'Database', 'is_quick_search': 'QuickSearch', 'name': 'Name', 'modified_date': 'ModifiedDate', 'id': 'ID', 'aql': 'AQL', 'is_shared': 'IsShared' } IP_GEOLOCATION_OLD_NEW_MAP = { 'continent': 'Continent', 'traits': 'Traits', 'geo_json': 'Geolocation', 'city': 'City', 'ip_address': 'IPAddress', 'represented_country': 'RepresentedCountry', 'registered_country': 'RegisteredCountry', 'is_local': 'IsLocalCountry', 'location': 'Location', 'postal': 'Postal', 'physical_country': 'PhysicalCountry', 'subdivisions': 'SubDivisions' } LOG_SOURCES_OLD_NEW_MAP = { 'sending_ip': 'SendingIP', 'internal': 'Internal', 'protocol_parameters': 'ProtocolParameters', 'description': 'Description', 'enabled': 'Enabled', 'group_ids': 'GroupIDs', 'credibility': 'Credibility', 'id': 'ID', 'protocol_type_id': 'ProtocolTypeID', 'creation_date': 'CreationDate', 'name': 'Name', 'modified_date': 'ModifiedDate', 'auto_discovered': 'AutoDiscovered', 'type_id': 'TypeID', 'last_event_time': 'LastEventTime', 'gateway': 'Gateway', 'status': 'Status' } USECS_ENTRIES = {'last_persisted_time', 'start_time', 'close_time', 'create_time', 'creation_time', 'creation_date', 'last_updated_time', 'first_persisted_time', 'modification_date', 'last_seen', 'first_seen', 'starttime', 'devicetime', 'last_reported', 'created', 'last_seen_profiler', 'last_seen_scanner', 'first_seen_scanner', 'first_seen_profiler', 'modified_time', 'last_event_time', 'modified_date', 'first_event_flow_seen', 'last_event_flow_seen'} LOCAL_DESTINATION_IPS_OLD_NEW_MAP = { 'domain_id': 'DomainID', 'event_flow_count': 'EventFlowCount', 'first_event_flow_seen': 'FirstEventFlowSeen', 'id': 'ID', 'last_event_flow_seen': 'LastEventFlowSeen', 'local_destination_ip': 'LocalDestinationIP', 'magnitude': 'Magnitude', 'network': 'Network', 'offense_ids': 'OffenseIDs', 'source_address_ids': 'SourceAddressIDs' } SOURCE_IPS_OLD_NEW_MAP = { 'domain_id': 'DomainID', 'event_flow_count': 'EventFlowCount', 'first_event_flow_seen': 'FirstEventFlowSeen', 'id': 'ID', 'last_event_flow_seen': 'LastEventFlowSeen', 'local_destination_address_ids': 'LocalDestinationAddressIDs', 'magnitude': 'Magnitude', 'network': 'Network', 'offense_ids': 'OffenseIDs', 'source_ip': 'SourceIP' } ''' ENRICHMENT MAPS ''' ASSET_PROPERTIES_NAME_MAP = { 'Unified Name': 'Name', 'CVSS Collateral Damage Potential': 'AggregatedCVSSScore', 'Weight': 'Weight' } FULL_ASSET_PROPERTIES_NAMES_MAP = { 'Compliance Notes': 'ComplianceNotes', 'Compliance Plan': 'CompliancePlan', 'Location': 'Location', 'Switch ID': 'SwitchID', 'Switch Port ID': 'SwitchPort', 'Group Name': 'GroupName', 'Vulnerabilities': 'Vulnerabilities', } LONG_RUNNING_REQUIRED_PARAMS = {'fetch_mode': 'Fetch mode', 'offenses_per_fetch': 'Number of offenses to pull per API call (max 50)', 'events_limit': 'Maximum number of events per incident.'} ''' ENUMS ''' class FetchMode(Enum): """ Enums for the options of fetching the incidents. """ no_events = 'Fetch Without Events' all_events = 'Fetch With All Events' correlations_events_only = 'Fetch Correlation Events Only' ''' CLIENT CLASS ''' class Client(BaseClient): def __init__(self, server: str, verify: bool, proxy: bool, api_version: str, credentials: Dict): username = credentials.get('identifier') password = credentials.get('password') if username == API_USERNAME: self.base_headers = {'Version': api_version, 'SEC': password} auth = None else: auth = (username, password) self.base_headers = {'Version': api_version} base_url = urljoin(server, '/api') super().__init__(base_url=base_url, verify=verify, proxy=proxy, auth=auth) self.password = password self.server = server def http_request(self, method: str, url_suffix: str, params: Optional[Dict] = None, json_data: Optional[Dict] = None, additional_headers: Optional[Dict] = None, timeout: Optional[int] = None): headers = {**additional_headers, **self.base_headers} if additional_headers else self.base_headers return self._http_request( method=method, url_suffix=url_suffix, params=params, json_data=json_data, headers=headers, error_handler=self.qradar_error_handler, timeout=timeout ) @staticmethod def qradar_error_handler(res: requests.Response): """ QRadar error handler for any error occurred during the API request. This function job is to translate the known exceptions returned by QRadar to human readable exception to help the user understand why the request have failed. If error returned is not in the expected error format, raises the exception as is. Args: res (Any): The error response returned by QRadar. Returns: - raises DemistoException. """ err_msg = f'Error in API call [{res.status_code}] - {res.reason}' try: # Try to parse json error response error_entry = res.json() message = error_entry.get('message', '') if 'items=x-y' in message: message = 'Failed to parse Range argument. The syntax of the Range argument must follow this pattern:' \ ' x-y' elif 'unauthorized to access' in err_msg or 'No SEC header present in request' in err_msg: message = 'Authorization Error: make sure credentials are correct.' elif 'The specified encryption strength is not available' in err_msg: err_msg = '' message = 'The specified encryption is not available, try using a weaker encryption (AES128).' elif 'User has insufficient capabilities to access this endpoint resource' in message: message = 'The given credentials do not have the needed permissions to perform the call the endpoint' \ f'\n{res.request.path_url}.\n' \ 'Please supply credentials with the needed permissions as can be seen in the integration ' \ 'description, or do not call or enrich offenses with the mentioned endpoint.' err_msg += f'\n{message}' raise DemistoException(err_msg, res=res) except ValueError: err_msg += '\n{}'.format(res.text) raise DemistoException(err_msg, res=res) def offenses_list(self, range_: Optional[str] = None, offense_id: Optional[int] = None, filter_: Optional[str] = None, fields: Optional[str] = None, sort: Optional[str] = None): id_suffix = f'/{offense_id}' if offense_id else '' params = assign_params(fields=fields) if offense_id else assign_params(filter=filter_, fields=fields, sort=sort) additional_headers = {'Range': range_} if not offense_id else None return self.http_request( method='GET', url_suffix=f'/siem/offenses{id_suffix}', params=params, additional_headers=additional_headers ) def offense_update(self, offense_id: int, protected: Optional[str] = None, follow_up: Optional[str] = None, status: Optional[str] = None, closing_reason_id: Optional[int] = None, assigned_to: Optional[str] = None, fields: Optional[str] = None): return self.http_request( method='POST', url_suffix=f'/siem/offenses/{offense_id}', params=assign_params( protected=protected, follow_up=follow_up, status=status, closing_reason_id=closing_reason_id, assigned_to=assigned_to, fields=fields ) ) def closing_reasons_list(self, closing_reason_id: Optional[int] = None, include_reserved: Optional[bool] = None, include_deleted: Optional[bool] = None, range_: Optional[str] = None, filter_: Optional[str] = None, fields: Optional[str] = None): id_suffix = f'/{closing_reason_id}' if closing_reason_id else '' params = assign_params(fields=fields) if closing_reason_id else assign_params(include_reserved=include_reserved, include_deleted=include_deleted, filter=filter_, fields=fields) additional_headers = {'Range': range_} if not closing_reason_id and range_ else None return self.http_request( method='GET', url_suffix=f'/siem/offense_closing_reasons{id_suffix}', additional_headers=additional_headers, params=params ) def offense_notes_list(self, offense_id: int, range_: str, note_id: Optional[int] = None, filter_: Optional[str] = None, fields: Optional[str] = None): note_id_suffix = f'/{note_id}' if note_id else '' params = assign_params(fields=fields) if note_id else assign_params(filter=filter_, fields=fields) additional_headers = {'Range': range_} if not note_id else None return self.http_request( method='GET', url_suffix=f'/siem/offenses/{offense_id}/notes{note_id_suffix}', additional_headers=additional_headers, params=params ) def offense_notes_create(self, offense_id: int, note_text: str, fields: Optional[str] = None): return self.http_request( method='POST', url_suffix=f'/siem/offenses/{offense_id}/notes', params=assign_params(note_text=note_text, fields=fields) ) def rules_list(self, rule_id: Optional[str] = None, range_: Optional[str] = None, filter_: Optional[str] = None, fields: Optional[str] = None): id_suffix = f'/{rule_id}' if rule_id else '' params = assign_params(fields=fields) if rule_id else assign_params(filter=filter_, fields=fields) additional_headers = {'Range': range_} if range_ and not rule_id else None return self.http_request( method='GET', url_suffix=f'/analytics/rules{id_suffix}', params=params, additional_headers=additional_headers ) def rule_groups_list(self, range_: str, rule_group_id: Optional[int] = None, filter_: Optional[str] = None, fields: Optional[str] = None): id_suffix = f'/{rule_group_id}' if rule_group_id else '' additional_headers = {'Range': range_} if not rule_group_id else None params = assign_params(fields=fields) if rule_group_id else assign_params(filter=filter_, fields=fields) return self.http_request( method='GET', url_suffix=f'/analytics/rule_groups{id_suffix}', additional_headers=additional_headers, params=params ) def assets_list(self, range_: Optional[str] = None, filter_: Optional[str] = None, fields: Optional[str] = None): return self.http_request( method='GET', url_suffix='/asset_model/assets', additional_headers={'Range': range_}, params=assign_params(filter=filter_, fields=fields) ) def saved_searches_list(self, range_: str, timeout: Optional[int], saved_search_id: Optional[str] = None, filter_: Optional[str] = None, fields: Optional[str] = None): id_suffix = f'/{saved_search_id}' if saved_search_id else '' params = assign_params(fields=fields) if saved_search_id else assign_params(filter=filter_, fields=fields) additional_headers = {'Range': range_} if not saved_search_id else None return self.http_request( method='GET', url_suffix=f'/ariel/saved_searches{id_suffix}', additional_headers=additional_headers, params=params, timeout=timeout ) def searches_list(self, range_: str, filter_: Optional[str] = None): return self.http_request( method='GET', url_suffix='/ariel/searches', additional_headers={'Range': range_}, params=assign_params(filter=filter_) ) def search_create(self, query_expression: Optional[str] = None, saved_search_id: Optional[str] = None): return self.http_request( method='POST', url_suffix='/ariel/searches', params=assign_params( query_expression=query_expression, saved_search_id=saved_search_id ) ) def search_status_get(self, search_id: str): return self.http_request( method='GET', url_suffix=f'/ariel/searches/{search_id}', ) def search_results_get(self, search_id: str, range_: Optional[str] = None): return self.http_request( method='GET', url_suffix=f'/ariel/searches/{search_id}/results', additional_headers={'Range': range_} if range_ else None ) def reference_sets_list(self, range_: Optional[str] = None, ref_name: Optional[str] = None, filter_: Optional[str] = None, fields: Optional[str] = None): name_suffix = f'/{parse.quote(ref_name, safe="")}' if ref_name else '' params = assign_params(fields=fields) if ref_name else assign_params(filter=filter_, fields=fields) additional_headers = {'Range': range_} if not ref_name else None return self.http_request( method='GET', url_suffix=f'/reference_data/sets{name_suffix}', params=params, additional_headers=additional_headers ) def reference_set_create(self, ref_name: str, element_type: str, timeout_type: Optional[str] = None, time_to_live: Optional[str] = None, fields: Optional[str] = None): return self.http_request( method='POST', url_suffix='/reference_data/sets', params=assign_params( name=ref_name, element_type=element_type, timeout_type=timeout_type, time_to_live=time_to_live, fields=fields ) ) def reference_set_delete(self, ref_name: str, purge_only: Optional[str] = None, fields: Optional[str] = None): return self.http_request( method='DELETE', url_suffix=f'/reference_data/sets/{parse.quote(ref_name, safe="")}', params=assign_params(purge_only=purge_only, fields=fields) ) def reference_set_value_upsert(self, ref_name: str, value: str, source: Optional[str] = None, fields: Optional[str] = None): return self.http_request( method='POST', url_suffix=f'/reference_data/sets/{parse.quote(ref_name, safe="")}', params=assign_params(value=value, source=source, fields=fields) ) def reference_set_value_delete(self, ref_name: str, value: str): return self.http_request( method='DELETE', url_suffix=f'/reference_data/sets/{parse.quote(ref_name, safe="")}/{value}' ) def domains_list(self, domain_id: Optional[int] = None, range_: Optional[str] = None, filter_: Optional[str] = None, fields: Optional[str] = None): id_suffix = f'/{domain_id}' if domain_id else '' params = assign_params(fields=fields) if domain_id else assign_params(filter=filter_, fields=fields) additional_headers = {'Range': range_} if not domain_id and range_ else None return self.http_request( method='GET', url_suffix=f'/config/domain_management/domains{id_suffix}', additional_headers=additional_headers, params=params ) def indicators_upload(self, ref_name: str, indicators: Any, fields: Optional[str] = None): headers = { 'Content-Type': 'application/json' } if fields: headers['fields'] = fields return self.http_request( method='POST', url_suffix=f'/reference_data/sets/bulk_load/{parse.quote(ref_name, safe="")}', json_data=indicators, additional_headers=headers ) def geolocations_for_ip(self, filter_: Optional[str] = None, fields: Optional[str] = None): return self.http_request( method='GET', url_suffix='/services/geolocations', params=assign_params(filter=filter_, fields=fields) ) def log_sources_list(self, qrd_encryption_algorithm: str, qrd_encryption_password: str, range_: str, filter_: Optional[str] = None, fields: Optional[str] = None): return self.http_request( method='GET', url_suffix='/config/event_sources/log_source_management/log_sources', params=assign_params(filter=filter_, fields=fields), additional_headers={ 'x-qrd-encryption-algorithm': qrd_encryption_algorithm, 'x-qrd-encryption-password': qrd_encryption_password, 'Range': range_ } ) def custom_properties(self, range_: Optional[str] = None, filter_: Optional[str] = None, fields: Optional[str] = None): return self.http_request( method='GET', url_suffix='/config/event_sources/custom_properties/regex_properties', params=assign_params(filter=filter_, fields=fields), additional_headers={'Range': range_} if range_ else None ) def offense_types(self, filter_: Optional[str] = None, fields: Optional[str] = None): return self.http_request( method='GET', url_suffix='/siem/offense_types', params=assign_params(filter=filter_, fields=fields) ) def get_addresses(self, address_suffix: str, filter_: Optional[str] = None, fields: Optional[str] = None, range_: Optional[str] = None): return self.http_request( method='GET', url_suffix=f'/siem/{address_suffix}', params=assign_params(filter=filter_, fields=fields), additional_headers={'Range': range_} if range_ else None ) def test_connection(self): """ Test connection with databases (should always be up) """ self.http_request(method='GET', url_suffix='/ariel/databases') return 'ok' ''' HELPER FUNCTIONS ''' def safely_update_context_data(func: Callable): """Decorator for updating context data using versions. In case of a race condition, preform func with the new context_data and try updating again. Args: func: The function to preform with the new context data before updating. raise ValueError if context_data or version are not in the kwargs for the function. raise DemistoException if reached maximum of retries. """ def wrapper(*args, **kwargs): context_was_set = False retries = 0 max_retries = 5 return_value = None while not context_was_set and retries < max_retries: context_data, version, return_value = func(*args, **kwargs) print_debug_msg(f'Attempting to update context data after version {version} with retry {retries}') new_context_data, new_version = get_integration_context_with_version() if new_version == version: try: set_to_integration_context_with_retries(context_data, max_retry_times=1) context_was_set = True print_debug_msg(f'Updated integration context after version {version} in retry {retries}.') except Exception as e: if 'Max retry attempts exceeded' in str(e): continue else: raise e else: if 'context_data' not in kwargs or 'version' not in kwargs: raise ValueError('context_data and version must be in the func kwargs if ' 'safely_update_context_data decorator is used but were not found.') else: kwargs['context_data'] = extract_context_data(new_context_data) kwargs['version'] = new_version print_debug_msg(f'Could not update context data after version {version} due to new ' f'version {new_version} in retry {retries}') retries = retries + 1 if retries == max_retries: raise DemistoException(f'Reached maximum retries, could not update context data for function {func}.') return return_value return wrapper def add_iso_entries_to_dict(dicts: List[Dict]) -> List[Dict]: """ Takes list of dicts, for each dict: creates a new dict, and for each field in the output that is contained in 'USECS_ENTRIES', maps its value to be iso format corresponding to the value of the field. Args: dicts (List[Dict]): List of the dicts to be transformed. Returns: (List[Dict]): New dicts with iso entries for the corresponding items in 'USECS_ENTRIES' """ return [{k: (get_time_parameter(v, iso_format=True) if k in USECS_ENTRIES else v) for k, v in dict_.items()} for dict_ in dicts] def sanitize_outputs(outputs: Any, key_replace_dict: Optional[Dict] = None) -> List[Dict]: """ Gets a list of all the outputs, and sanitizes outputs. - Removes empty elements. - adds ISO entries to the outputs. - Outputs only keys found in 'key_replace_dict', saving their names by 'key_replace_dict values, if 'key_replace_dict' is not None. Args: outputs (List[Dict]): List of the outputs to be sanitized. key_replace_dict (Dict): Dict of the keys to transform their names. Returns: (List[Dict]): Sanitized outputs. """ if not isinstance(outputs, list): outputs = [outputs] outputs = [remove_empty_elements(output) for output in outputs] outputs = add_iso_entries_to_dict(outputs) return build_final_outputs(outputs, key_replace_dict) if key_replace_dict else outputs def get_time_parameter(arg: Union[Optional[str], Optional[int]], iso_format: bool = False, epoch_format: bool = False): """ parses arg into date time object with aware time zone if 'arg' exists. If no time zone is given, sets timezone to UTC. Returns the date time object created/ISO format/epoch format. Args: arg (str): The argument to turn into aware date time. iso_format (bool): Whether to return date or the parsed format of the date. epoch_format (bool): Whether to return date or the epoch format of the date. Returns: - (None) If 'arg' is None, returns None. - (datetime): If 'arg' is exists and 'iso_format' and 'epoch_format' are false, returns date time. - (str): If 'arg' is exists and parse_format is true, returns ISO format of the date time object. - (int): If 'arg' is exists and epoch_format is true, returns epoch format of the date time object. """ maybe_unaware_date = arg_to_datetime(arg, is_utc=True) if not maybe_unaware_date: return None aware_time_date = maybe_unaware_date if maybe_unaware_date.tzinfo else UTC_TIMEZONE.localize( maybe_unaware_date) if iso_format: return aware_time_date.isoformat() if epoch_format: return int(aware_time_date.timestamp() * 1000) return aware_time_date def build_final_outputs(outputs: List[Dict], old_new_dict: Dict) -> List[Dict]: """ Receives outputs, or a single output, and a dict containing mapping of old key names to new key names. Returns a list of outputs containing the new names contained in old_new_dict. Args: outputs (Dict): Outputs to replace its keys. old_new_dict (Dict): Old key name mapped to new key name. Returns: (Dict): The dictionary with the transformed keys and their values. """ return [{old_new_dict.get(k): v for k, v in output.items() if k in old_new_dict} for output in outputs] def build_headers(first_headers: List[str], all_headers: Set[str]) -> List[str]: """ Receives headers to be shown first in entry room, and concat all the headers after first headers. Args: first_headers (Set[str]): First headers to be shown in the entry room. all_headers (Set[str]): List of all of the headers. Returns: (List[str]): List of all of the headers, where first_headers are first in the list. """ return first_headers + list(set.difference(all_headers, first_headers)) def is_valid_ip(ip: str) -> bool: try: ip_address(ip) return True except ValueError: print_debug_msg(f'IP {ip} was found invalid.') return False def get_offense_types(client: Client, offenses: List[Dict]) -> Dict: """ Receives list of offenses, and performs API call to QRadar service to retrieve the offense type names matching the offense type IDs of the offenses. Args: client (Client): Client to perform the API request to QRadar. offenses (List[Dict]): List of all of the offenses. Returns: (Dict): Dictionary of {offense_type_id: offense_type_name} """ try: offense_types_ids = {offense.get('offense_type') for offense in offenses if offense.get('offense_type') is not None} if not offense_types_ids: return dict() offense_types = client.offense_types(filter_=f'''id in ({','.join(map(str, offense_types_ids))})''', fields='id,name') return {offense_type.get('id'): offense_type.get('name') for offense_type in offense_types} except Exception as e: demisto.error(f"Encountered an issue while getting offense type: {e}") return {} def get_offense_closing_reasons(client: Client, offenses: List[Dict]) -> Dict: """ Receives list of offenses, and performs API call to QRadar service to retrieve the closing reason names matching the closing reason IDs of the offenses. Args: client (Client): Client to perform the API request to QRadar. offenses (List[Dict]): List of all of the offenses. Returns: (Dict): Dictionary of {closing_reason_id: closing_reason_name} """ try: closing_reason_ids = {offense.get('closing_reason_id') for offense in offenses if offense.get('closing_reason_id') is not None} if not closing_reason_ids: return dict() closing_reasons = client.closing_reasons_list(filter_=f'''id in ({','.join(map(str, closing_reason_ids))})''', fields='id,text') return {closing_reason.get('id'): closing_reason.get('text') for closing_reason in closing_reasons} except Exception as e: demisto.error(f"Encountered an issue while getting offense closing reasons: {e}") return {} def get_domain_names(client: Client, outputs: List[Dict]) -> Dict: """ Receives list of outputs, and performs API call to QRadar service to retrieve the domain names matching the domain IDs of the outputs. Args: client (Client): Client to perform the API request to QRadar. outputs (List[Dict]): List of all of the offenses. Returns: (Dict): Dictionary of {domain_id: domain_name} """ try: domain_ids = {offense.get('domain_id') for offense in outputs if offense.get('domain_id') is not None} if not domain_ids: return dict() domains_info = client.domains_list(filter_=f'''id in ({','.join(map(str, domain_ids))})''', fields='id,name') return {domain_info.get('id'): domain_info.get('name') for domain_info in domains_info} except Exception as e: demisto.error(f"Encountered an issue while getting offense domain names: {e}") return {} def get_rules_names(client: Client, offenses: List[Dict]) -> Dict: """ Receives list of offenses, and performs API call to QRadar service to retrieve the rules names matching the rule IDs of the offenses. Args: client (Client): Client to perform the API request to QRadar. offenses (List[Dict]): List of all of the offenses. Returns: (Dict): Dictionary of {rule_id: rule_name} """ try: rules_ids = {rule.get('id') for offense in offenses for rule in offense.get('rules', [])} if not rules_ids: return dict() rules = client.rules_list(None, None, f'''id in ({','.join(map(str, rules_ids))})''', 'id,name') return {rule.get('id'): rule.get('name') for rule in rules} except Exception as e: demisto.error(f"Encountered an issue while getting offenses rules: {e}") return {} def get_offense_addresses(client: Client, offenses: List[Dict], is_destination_addresses: bool) -> Dict: """ Receives list of offenses, and performs API call to QRadar service to retrieve the source IP values matching the source IPs IDs of the offenses. Args: client (Client): Client to perform the API request to QRadar. offenses (List[Dict]): List of all of the offenses. is_destination_addresses(bool): Whether addresses to enrich are destination IPs (or source). Returns: (Dict): Dictionary of {source_address_id: source_address_name}. """ address_type = 'local_destination' if is_destination_addresses else 'source' address_field = f'{address_type}_ip' address_list_field = f'{address_type}_address_ids' url_suffix = f'{address_type}_addresses' def get_addresses_for_batch(b: List): try: return client.get_addresses(url_suffix, f'''id in ({','.join(map(str, b))})''', f'id,{address_field}') except Exception as e: demisto.error(f'Failed getting address barch with error: {e}') return [] addresses_ids = [address_id for offense in offenses for address_id in offense.get(address_list_field, [])] # Submit addresses in batches to avoid overloading QRadar service addresses_batches = [get_addresses_for_batch(b) for b in batch(addresses_ids[:OFF_ENRCH_LIMIT], batch_size=int(BATCH_SIZE))] return {address_data.get('id'): address_data.get(address_field) for addresses_batch in addresses_batches for address_data in addresses_batch} def create_single_asset_for_offense_enrichment(asset: Dict) -> Dict: """ Recieves one asset, and returns the expected asset values for enriching offense. Args: asset (Dict): Asset to enrich the offense with Returns: (Dict): The enriched asset. """ interfaces = {'interfaces': [{ 'mac_address': interface.get('mac_address'), 'id': interface.get('id'), 'ip_addresses': [{ 'type': ip_add.get('type'), 'value': ip_add.get('value') } for ip_add in interface.get('ip_addresses', [])] } for interface in asset.get('interfaces', [])]} properties = {prop.get('name'): prop.get('value') for prop in asset.get('properties', []) if 'name' in prop and 'value' in prop} offense_without_properties = {k: v for k, v in asset.items() if k != 'properties'} return add_iso_entries_to_asset(dict(offense_without_properties, **properties, **interfaces)) def enrich_offense_with_assets(client: Client, offense_ips: List[str]) -> List[Dict]: """ Receives list of offense's IPs, and performs API call to QRadar service to retrieve assets correlated to IPs given. Args: client (Client): Client to perform the API request to QRadar. offense_ips (List[str]): List of all of the offense's IPs. Returns: (List[Dict]): List of all the correlated assets. """ def get_assets_for_ips_batch(b: List): filter_query = ' or '.join([f'interfaces contains ip_addresses contains value="{ip}"' for ip in b]) try: return client.assets_list(filter_=filter_query) except Exception as e: demisto.error(f'Failed getting assets for filter_query: {filter_query}. {e}') return [] offense_ips = [offense_ip for offense_ip in offense_ips if is_valid_ip(offense_ip)] # Submit addresses in batches to avoid overloading QRadar service assets = [asset for b in batch(offense_ips[:OFF_ENRCH_LIMIT], batch_size=int(BATCH_SIZE)) for asset in get_assets_for_ips_batch(b)] return [create_single_asset_for_offense_enrichment(asset) for asset in assets] def enrich_offenses_result(client: Client, offenses: Any, enrich_ip_addresses: bool, enrich_assets: bool) -> List[Dict]: """ Receives list of offenses, and enriches the offenses with the following: - Changes offense_type value from the offense type ID to the offense type name. - Changes closing_reason_id value from closing reason ID to the closing reason name. - Adds a link to the URL of each offense. - Adds the domain name of the domain ID for each offense. - Adds to each rule of the offense its name. - Adds enrichment to each source/destination IP ID to its address (if enrich_ip_addresses is true). - Adds enrichment of assets to each offense (if enrich_assets is true). Args: client (Client): Client to perform the API calls. offenses (Any): List of all of the offenses to enrich. enrich_ip_addresses (bool): Whether to enrich the offense source/destination IP addresses. enrich_assets (bool): Whether to enrich the offense with assets. Returns: (List[Dict]): The enriched offenses. """ if not isinstance(offenses, list): offenses = [offenses] print_debug_msg('Enriching offenses') offense_types_id_name_dict = get_offense_types(client, offenses) closing_reasons_id_name_dict = get_offense_closing_reasons(client, offenses) domain_id_name_dict = get_domain_names(client, offenses) if DOMAIN_ENRCH_FLG.lower() == 'true' else dict() rules_id_name_dict = get_rules_names(client, offenses) if RULES_ENRCH_FLG.lower() == 'true' else dict() source_addresses_id_ip_dict = get_offense_addresses(client, offenses, False) if enrich_ip_addresses else dict() destination_addresses_id_ip_dict = get_offense_addresses(client, offenses, True) if enrich_ip_addresses else dict() def create_enriched_offense(offense: Dict) -> Dict: link_to_offense_suffix = '/console/do/sem/offensesummary?appName=Sem&pageId=OffenseSummary&summaryId' \ f'''={offense.get('id')}''' offense_type = offense.get('offense_type') closing_reason_id = offense.get('closing_reason_id') domain_id = offense.get('domain_id') basic_enriches = { 'offense_type': offense_types_id_name_dict.get(offense_type, offense_type), 'closing_reason_id': closing_reasons_id_name_dict.get(closing_reason_id, closing_reason_id), 'LinkToOffense': urljoin(client.server, link_to_offense_suffix), } domain_enrich = { 'domain_name': domain_id_name_dict.get(domain_id, domain_id) } if DOMAIN_ENRCH_FLG.lower() == 'true' and domain_id_name_dict.get(domain_id, domain_id) else dict() rules_enrich = { 'rules': [{ 'id': rule.get('id'), 'type': rule.get('type'), 'name': rules_id_name_dict.get(rule.get('id'), rule.get('id')) } for rule in offense.get('rules', [])] if RULES_ENRCH_FLG.lower() == 'true' else dict() } source_addresses_enrich = { 'source_address_ids': [source_addresses_id_ip_dict.get(source_address_id) for source_address_id in offense.get('source_address_ids', [])] } if enrich_ip_addresses else dict() destination_addresses_enrich = { 'local_destination_address_ids': [destination_addresses_id_ip_dict.get(destination_address_id) for destination_address_id in offense.get('local_destination_address_ids', [])] } if enrich_ip_addresses else dict() if enrich_assets: source_ips: List = source_addresses_enrich.get('source_address_ids', []) destination_ips: List = destination_addresses_enrich.get('local_destination_address_ids', []) all_ips: List = source_ips + destination_ips asset_enrich = {'assets': enrich_offense_with_assets(client, all_ips)} else: asset_enrich = dict() return dict(offense, **basic_enriches, **domain_enrich, **rules_enrich, **source_addresses_enrich, **destination_addresses_enrich, **asset_enrich) result = [create_enriched_offense(offense) for offense in offenses] print_debug_msg('Enriched offenses successfully.') return result def enrich_asset_properties(properties: List, properties_to_enrich_dict: Dict) -> Dict: """ Receives list of properties of an asset, and properties to enrich, and returns a dict containing the enrichment Args: properties (List): List of properties of an asset. properties_to_enrich_dict (Dict): Properties to be enriched. Returns: (List[Dict]) List of new assets with enrichment. """ return { properties_to_enrich_dict.get(prop.get('name')): { 'Value': prop.get('value'), 'LastUser': prop.get('last_reported_by') } for prop in properties if prop.get('name') in properties_to_enrich_dict } def add_iso_entries_to_asset(asset: Dict) -> Dict: """ Transforms epoch entries to ISO entries in an asset. Requires a special treatment, because some of the usec entries are nested. Args: asset (Dict): Asset to transform its epoch entries to ISO. Returns: (Dict): Asset transformed. """ def get_asset_entry(k: str, v: Any): if k == 'interfaces': return [{ k: (get_time_parameter(v, iso_format=True) if k in USECS_ENTRIES else add_iso_entries_to_dict(v) if k == 'ip_addresses' else v) for k, v in interface.items() } for interface in v] elif k == 'properties': return add_iso_entries_to_dict(v) elif k in USECS_ENTRIES: return get_time_parameter(v, iso_format=True) else: return v return {k: get_asset_entry(k, v) for k, v in asset.items()} def enrich_assets_results(client: Client, assets: Any, full_enrichment: bool) -> List[Dict]: """ Receives list of assets, and enriches each asset with 'Endpoint' entry containing the following: - IP addresses of all interfaces. - OS name. - MAC addresses of the interfaces, if full enrichment was requested. - Domain name if full enrichment was requested. - Properties enrichment. Args: client (Client): Client to perform API call to retrieve domain names corresponding to the domain IDs. assets (List[Dict]): List of assets to be enriched. full_enrichment (bool): Whether the asset should be full enriched. Returns: (List[Dict]) List of new assets with enrichment. """ domain_id_name_dict = get_domain_names(client, assets) if full_enrichment else dict() def enrich_single_asset(asset: Dict) -> Dict: updated_asset = add_iso_entries_to_asset(asset) interfaces = updated_asset.get('interfaces', []) properties = updated_asset.get('properties', []) domain_id = updated_asset.get('domain_id') os_name = next((prop.get('value') for prop in properties if prop.get('name') == 'Primary OS ID'), None) ip_enrichment = { 'IPAddress': [ip_add.get('value') for interface in interfaces for ip_add in interface.get('ip_addresses', []) if ip_add.get('value')] } os_enrichment = {'OS': os_name} if os_name else dict() mac_enrichment = { 'MACAddress': [interface.get('mac_address') for interface in interfaces if interface.get('mac_address')] } if full_enrichment else dict() domains_enrichment = {'Domain': domain_id_name_dict.get(domain_id, domain_id)} \ if full_enrichment and domain_id else dict() basic_properties_enrichment = enrich_asset_properties(properties, ASSET_PROPERTIES_NAME_MAP) full_properties_enrichment = enrich_asset_properties(properties, FULL_ASSET_PROPERTIES_NAMES_MAP) \ if full_enrichment else dict() enriched_asset = dict(asset, **basic_properties_enrichment, **full_properties_enrichment) return {'Asset': add_iso_entries_to_asset(enriched_asset), 'Endpoint': dict(ip_enrichment, **os_enrichment, **mac_enrichment, **domains_enrichment)} return [enrich_single_asset(asset) for asset in assets] def get_minimum_id_to_fetch(highest_offense_id: int, user_query: Optional[str]) -> int: """ Receives the highest offense ID saved from last run, and user query. Checks if user query has a limitation for a minimum ID. If such ID exists, returns the maximum between 'highest_offense_id' and the minimum ID limitation received by the user query. Args: highest_offense_id (int): Minimum ID to fetch offenses by from last run. user_query (Optional[str]): User query for QRadar service. Returns: (int): The Minimum ID to fetch offenses by. """ if user_query: id_query = ID_QUERY_REGEX.search(user_query) if id_query: id_query_raw = id_query.group(0) operator = '>=' if '>=' in id_query_raw else '>' # safe to int parse without catch because regex checks for number user_offense_id = int(id_query.group(0).split(operator)[1].strip()) user_lowest_offense_id = user_offense_id if operator == '>' else user_offense_id - 1 print_debug_msg(f'Found ID in user query: {user_lowest_offense_id}, last highest ID: {highest_offense_id}') return max(highest_offense_id, user_lowest_offense_id) return highest_offense_id def get_offense_enrichment(enrichment: str) -> Tuple[bool, bool]: """ Receives enrichment asked by the user, returns true or false values indicating which enrichment should be done. Args: enrichment (Optional[str]): Enrichment argument. Returns: (bool, bool): Tuple of (ip_enrich, asset_enrich). """ if enrichment == 'IPs And Assets': return True, True if enrichment == 'IPs': return True, False return False, False def print_debug_msg(msg: str): """ Prints a message to debug with QRadarMsg prefix. Args: msg (str): Message to be logged. """ demisto.debug(f'QRadarMsg - {msg}') def reset_mirroring_events_variables(mirror_options: str): """In case of change in mirror_options initialize mirror with events context data variables. Args: mirror_options: The current mirror options Returns: None """ ctx = extract_context_data(get_integration_context().copy()) try: print_mirror_events_stats(ctx, f"New Long Running Container - Before Mirroring Variables Reset, " f"Mirror Option {mirror_options}") except Exception as e: print_debug_msg(f'Could not print mirror_events_stats due to error: {str(e)} \n ' f'Reseting mirroring vars') mirror_options = 'needs reset to mirroring vars' if mirror_options != MIRROR_OFFENSE_AND_EVENTS: ctx[UPDATED_MIRRORED_OFFENSES_CTX_KEY] = [] ctx[MIRRORED_OFFENSES_CTX_KEY] = [] ctx[RESUBMITTED_MIRRORED_OFFENSES_CTX_KEY] = [] print_mirror_events_stats(ctx, "New Long Running Container - After Mirroring Variables Reset") set_to_integration_context_with_retries(encode_context_data(ctx)) def is_reset_triggered(): """ Checks if reset of integration context have been made by the user. Because fetch is long running execution, user communicates with us by calling 'qradar-reset-last-run' command which sets reset flag in context. Returns: (bool): - True if reset flag was set. If 'handle_reset' is true, also resets integration context. - False if reset flag was not found in integration context. """ ctx = get_integration_context() if ctx and RESET_KEY in ctx: print_debug_msg('Reset fetch-incidents.') set_integration_context({'samples': '[]'}) return True return False def validate_long_running_params(params: Dict) -> None: """ Receives params, checks whether the required parameters for long running execution is configured. Args: params (Dict): Cortex XSOAR params. Returns: (None): If all required params are set, raises DemistoException otherwise. """ for param_field, param_display in LONG_RUNNING_REQUIRED_PARAMS.items(): if param_field not in params: raise DemistoException(f'Parameter {param_display} is required when enabling long running execution.' ' Please set a value for it.') ''' COMMAND FUNCTIONS ''' def test_module_command(client: Client, params: Dict) -> str: """ Tests API connectivity and authentication' Returning 'ok' indicates that the integration works like it is supposed to. Connection to the service is successful. Raises exceptions if something goes wrong. Args: client (Client): Client to perform the API calls. params (Dict): Demisto params. Returns: - (str): 'ok' if test passed - raises DemistoException if something had failed the test. """ try: ctx = extract_context_data(get_integration_context(), include_id=True) print_mirror_events_stats(ctx, "Test Module") is_long_running = params.get('longRunning') if is_long_running: validate_long_running_params(params) ip_enrich, asset_enrich = get_offense_enrichment(params.get('enrichment', 'IPs And Assets')) # Try to retrieve the last successfully retrieved offense last_highest_id = max(ctx.get(LAST_FETCH_KEY, 0) - 1, 0) get_incidents_long_running_execution( client=client, offenses_per_fetch=1, user_query=params.get('query', ''), fetch_mode=params.get('fetch_mode', ''), events_columns=params.get('events_columns', ''), events_limit=0, ip_enrich=ip_enrich, asset_enrich=asset_enrich, last_highest_id=last_highest_id, incident_type=params.get('incident_type'), mirror_direction=MIRROR_DIRECTION.get(params.get('mirror_options', DEFAULT_MIRRORING_DIRECTION)) ) else: client.offenses_list(range_="items=0-0") message = 'ok' except DemistoException as e: err_msg = str(e) if 'unauthorized to access the requested resource' in err_msg or 'No SEC header present in request' in err_msg: message = 'Authorization Error: make sure credentials are correct.' else: raise e return message def fetch_incidents_command() -> List[Dict]: """ Fetch incidents implemented, for mapping purposes only. Returns list of samples saved by long running execution. Returns: (List[Dict]): List of incidents samples. """ ctx = get_integration_context() return extract_context_data(ctx).get('samples', []) def create_search_with_retry(client: Client, fetch_mode: str, offense: Dict, event_columns: str, events_limit: int, max_retries: int = EVENTS_FAILURE_LIMIT) -> Optional[Dict]: """ Creates a search to retrieve events for an offense. Has retry mechanism, because QRadar service tends to return random errors when it is loaded. Therefore, 'max_retries' retries will be made, to try avoid such cases as much as possible. Args: client (Client): Client to perform the API calls. fetch_mode (str): Which enrichment mode was requested. Can be 'Fetch With All Events', 'Fetch Correlation Events Only' offense (Dict): Offense ID to enrich with events. event_columns (str): Columns of the events to be extracted from query. events_limit (int): Maximum number of events to enrich the offense. max_retries (int): Number of retries. Returns: (Dict): If search was created successfully. None: If reset was triggered or number of retries exceeded limit. """ additional_where = ''' AND LOGSOURCETYPENAME(devicetype) = 'Custom Rule Engine' ''' \ if fetch_mode == FetchMode.correlations_events_only.value else '' # Decrease 1 minute from start_time to avoid the case where the minute queried of start_time equals end_time. offense_start_time = offense['start_time'] - 60 * 1000 offense_id = offense['id'] query_expression = ( f'SELECT {event_columns} FROM events WHERE INOFFENSE({offense_id}) {additional_where} limit {events_limit} ' f'START {offense_start_time}' ) print_debug_msg(f'Trying to get events for offense ID: {offense_id}, ' f'offense_start_time: {offense_start_time}, ' f'additional_where: {additional_where}, ' f'events_limit: {events_limit}.') num_of_failures = 0 while num_of_failures <= max_retries: try: print_debug_msg(f'Creating search for offense ID: {offense_id}, ' f'query_expression: {query_expression}.') ret_value = client.search_create(query_expression=query_expression) print_debug_msg(f'Created search for offense ID: {offense_id}, ' f'offense_start_time: {offense_start_time}, ' f'additional_where: {additional_where}, ' f'events_limit: {events_limit}, ' f'ret_value: {ret_value}.') return ret_value except Exception: print_debug_msg(f'Failed to create search for offense ID: {offense_id}. ' f'Retry number {num_of_failures}/{max_retries}.') print_debug_msg(traceback.format_exc()) num_of_failures += 1 if num_of_failures == max_retries: print_debug_msg(f'Max retries for creating search for offense: {offense_id}. Returning empty.') break time.sleep(FAILURE_SLEEP) print_debug_msg(f'Returning empty events for offense ID: {offense_id}.') return None def poll_offense_events_with_retry(client: Client, search_id: str, offense_id: int, max_retries: int = EVENTS_FAILURE_LIMIT) -> Tuple[List[Dict], str]: """ Polls QRadar service for search ID given until status returned is within '{'CANCELED', 'ERROR', 'COMPLETED'}'. Afterwards, performs a call to retrieve the events returned by the search. Has retry mechanism, because QRadar service tends to return random errors when it is loaded. Therefore, 'max_retries' retries will be made, to try avoid such cases as much as possible. Args: client (Client): Client to perform the API calls. search_id (str): ID of the search to poll for its status. offense_id (int): ID of the offense to enrich with events returned by search. Used for logging purposes here. max_retries (int): Number of retries. Returns: (List[Dict], str): List of events returned by query. Returns empty list if number of retries exceeded limit, A failure message in case an error occured. """ num_of_failures = 0 start_time = time.time() failure_message = '' while num_of_failures <= max_retries: try: print_debug_msg(f"Getting search status for {search_id}") search_status_response = client.search_status_get(search_id) print_debug_msg(f"Got search status for {search_id}") query_status = search_status_response.get('status') # failures are relevant only when consecutive num_of_failures = 0 print_debug_msg(f'Search query_status: {query_status}') # Possible values for query_status: {'CANCELED', 'ERROR', 'COMPLETED'} # Don't try to get events if CANCELLED or ERROR if query_status in {'CANCELED', 'ERROR'}: if failure_message == '': failure_message = f'query_status is {query_status}' return [], failure_message elif query_status == 'COMPLETED': print_debug_msg(f'Getting events for offense {offense_id}') search_results_response = client.search_results_get(search_id) print_debug_msg(f'Http response: {search_results_response.get("http_response", "Not specified - ok")}') events = search_results_response.get('events', []) sanitized_events = sanitize_outputs(events) print_debug_msg(f'Fetched {len(sanitized_events)} events for offense {offense_id}.') return sanitized_events, failure_message elapsed = time.time() - start_time if elapsed >= FETCH_SLEEP: # print status debug every fetch sleep (or after) print_debug_msg(f'Still fetching offense {offense_id} events, search_id: {search_id}.') start_time = time.time() time.sleep(EVENTS_INTERVAL_SECS) except Exception as e: print_debug_msg( f'Error while fetching offense {offense_id} events, search_id: {search_id}. Error details: {str(e)} \n' f'{traceback.format_exc()}') num_of_failures += 1 if num_of_failures < max_retries: time.sleep(FAILURE_SLEEP) else: failure_message = f'{repr(e)} \nSee logs for further details.' print_debug_msg(f'Could not fetch events for offense ID: {offense_id}, returning empty events array.') return [], failure_message def enrich_offense_with_events(client: Client, offense: Dict, fetch_mode: str, events_columns: str, events_limit: int, max_retries: int = MAX_FETCH_EVENT_RETIRES): """ Enriches offense given with events. Has retry mechanism for events returned by query to QRadar. This is needed because events might not be indexed when performing the search, and QRadar will return less events than expected. Retry mechanism here meant to avoid such cases as much as possible Args: client (Client): Client to perform the API calls. offense (Dict): Offense to enrich with events. fetch_mode (str): Which enrichment mode was requested. Can be 'Fetch With All Events', 'Fetch Correlation Events Only' events_columns (str): Columns of the events to be extracted from query. events_limit (int): Maximum number of events to enrich the offense. max_retries (int): Number of retries. Returns: (Dict): Enriched offense with events. """ failure_message = '' events: List[dict] = [] min_events_size = min(offense.get('event_count', 0), events_limit) # decreasing 1 minute from the start_time to avoid the case where the minute queried of start_time equals end_time. for i in range(max_retries): # retry to check if we got all the event (its not an error retry), see docstring search_response = create_search_with_retry(client, fetch_mode, offense, events_columns, events_limit) if not search_response: continue offense_id = offense['id'] events, failure_message = poll_offense_events_with_retry(client, search_response['search_id'], offense_id) print_debug_msg(f"Polled events for offense ID {offense_id}") if len(events) >= min_events_size: print_debug_msg(f"Fetched {len(events)}/{min_events_size} for offense ID {offense_id}") break print_debug_msg(f'Did not fetch enough events. Expected at least {min_events_size}. Retrying to fetch events ' f'for offense ID: {offense_id}. Retry number {i}/{max_retries}') if i < max_retries - 1: time.sleep(SLEEP_FETCH_EVENT_RETIRES) print_debug_msg(f"Reached max retries for offense {offense.get('id')} with failure message {failure_message}") if failure_message == '' and len(events) < min_events_size: failure_message = 'Events were probably not indexed in QRadar at the time of the mirror.' offense = dict(offense, mirroring_events_message=failure_message) if events: offense = dict(offense, events=events) return offense def get_incidents_long_running_execution(client: Client, offenses_per_fetch: int, user_query: str, fetch_mode: str, events_columns: str, events_limit: int, ip_enrich: bool, asset_enrich: bool, last_highest_id: int, incident_type: Optional[str], mirror_direction: Optional[str]) -> Tuple[Optional[List[Dict]], Optional[int]]: """ Gets offenses from QRadar service, and transforms them to incidents in a long running execution. Args: client (Client): Client to perform the API calls. offenses_per_fetch (int): Maximum number of offenses to be fetched. user_query (str): If given, the user filters for fetching offenses from QRadar service. fetch_mode (str): Fetch mode of the offenses. Can be 'Fetch Without Events', 'Fetch With All Events', 'Fetch Correlation Events Only' events_columns (str): Events columns to extract by search query for each offense. Only used when fetch mode is not 'Fetch Without Events'. events_limit (int): Number of events to be fetched for each offense. Only used when fetch mode is not 'Fetch Without Events'. ip_enrich (bool): Whether to enrich offense by changing IP IDs of each offense to its IP value. asset_enrich (bool): Whether to enrich offense with assets last_highest_id (int): The highest ID of all the offenses that have been fetched from QRadar service. incident_type (Optional[str]): Incident type. mirror_direction (Optional[str]): Whether mirror in is activated or not. Returns: (List[Dict], int): List of the incidents, and the new highest ID for next fetch. (None, None): if reset was triggered """ offense_highest_id = get_minimum_id_to_fetch(last_highest_id, user_query) user_query = f' AND {user_query}' if user_query else '' filter_fetch_query = f'id>{offense_highest_id}{user_query}' print_debug_msg(f'Filter query to QRadar: {filter_fetch_query}') range_max = offenses_per_fetch - 1 if offenses_per_fetch else MAXIMUM_OFFENSES_PER_FETCH - 1 range_ = f'items=0-{range_max}' # if it fails here we can't recover, retry again later raw_offenses = client.offenses_list(range_, filter_=filter_fetch_query, sort=ASCENDING_ID_ORDER) if raw_offenses: raw_offenses_len = len(raw_offenses) print_debug_msg(f'raw_offenses size: {raw_offenses_len}') else: print_debug_msg('empty raw_offenses') new_highest_offense_id = raw_offenses[-1].get('id') if raw_offenses else offense_highest_id print_debug_msg(f'New highest ID returned from QRadar offenses: {new_highest_offense_id}') offenses = [] if fetch_mode != FetchMode.no_events.value: try: futures = [] for offense in raw_offenses: futures.append(EXECUTOR.submit( enrich_offense_with_events, client=client, offense=offense, fetch_mode=fetch_mode, events_columns=events_columns, events_limit=events_limit, )) offenses = [future.result(timeout=DEFAULT_EVENTS_TIMEOUT * 60) for future in futures] except concurrent.futures.TimeoutError as e: demisto.error( f"Error while enriching mirrored offenses with events: {str(e)} \n {traceback.format_exc()}") update_missing_offenses_from_raw_offenses(raw_offenses, offenses) else: offenses = raw_offenses if is_reset_triggered(): return None, None offenses_with_mirror = [ dict(offense, mirror_direction=mirror_direction, mirror_instance=demisto.integrationInstance()) for offense in offenses] if mirror_direction else offenses enriched_offenses = enrich_offenses_result(client, offenses_with_mirror, ip_enrich, asset_enrich) final_offenses = sanitize_outputs(enriched_offenses) incidents = create_incidents_from_offenses(final_offenses, incident_type) return incidents, new_highest_offense_id def update_missing_offenses_from_raw_offenses(raw_offenses: list, offenses: list): """ Populate offenses with missing offenses """ offenses_ids = {offense['id'] for offense in raw_offenses} or set() updated_offenses_ids = {offense['id'] for offense in offenses} or set() missing_ids = offenses_ids - updated_offenses_ids if missing_ids: for offense in raw_offenses: if offense['id'] in missing_ids: offenses.append(offense) def exclude_lists(original: List[dict], exclude: List[dict], key: str): """Exclude nodes of exclude list from the original list by key Args: original: The original list to exclude from exclude: The list of nodes to exclude key: The key to exclude by Returns: A list with the original nodes that were not excluded. """ exclude_keys = [excluded_node.get(key) for excluded_node in exclude] return [element.copy() for element in original if element.get(key) not in exclude_keys] def update_mirrored_events(client: Client, fetch_mode: str, events_columns: str, events_limit: int, context_data: dict, offenses_per_fetch: int) -> list: """Update mirrored offenses' events assuming a long running container. Args: client: Client to perform the API calls. fetch_mode: Bring correlated / not correlated events. events_columns: Events columns to extract by search query for each offense. events_limit: Number of events to be fetched for each offense. context_data: The integration's current context data. Extract the relevant offenses to update from it. offenses_per_fetch: The number of offenses to fetch. Returns: (A list of updated offenses with their events) """ offenses = context_data.get(MIRRORED_OFFENSES_CTX_KEY, []) if len(offenses) > offenses_per_fetch: offenses = offenses[:offenses_per_fetch] updated_offenses = [] try: if len(offenses) > 0: futures = [] for offense in offenses: print_debug_msg(f"Updating events in offense: {offense.get('id')}") futures.append(EXECUTOR.submit( enrich_offense_with_events, client=client, offense=offense, fetch_mode=fetch_mode, events_columns=events_columns, events_limit=events_limit, )) updated_offenses += [future.result(timeout=DEFAULT_EVENTS_TIMEOUT * 60) for future in futures] except Exception as e: print_debug_msg(f"Error while enriching mirrored offenses with events: {str(e)} \n {traceback.format_exc()}") update_missing_offenses_from_raw_offenses(offenses, updated_offenses) finally: return updated_offenses def create_incidents_from_offenses(offenses: List[Dict], incident_type: Optional[str]) -> List[Dict]: """ Transforms list of offenses given into incidents for Demisto. Args: offenses (List[Dict]): List of the offenses to transform into incidents. incident_type (Optional[str]): Incident type to be used for each incident. Returns: (List[Dict]): Incidents list. """ print_debug_msg(f'Creating {len(offenses)} incidents') return [{ 'name': f'''{offense.get('id')} {offense.get('description', '')}''', 'rawJSON': json.dumps(offense), 'occurred': get_time_parameter(offense.get('start_time'), iso_format=True), 'type': incident_type } for offense in offenses] def print_mirror_events_stats(context_data: dict, stage: str) -> Set[str]: """Print debug message with information about mirroring events. Args: context_data: The integration context data. stage: A prefix for the debug message. Returns: The ids of the mirrored offenses being currently processed. """ if not context_data: print_debug_msg("Not printing stats") return set() updated = context_data.get(UPDATED_MIRRORED_OFFENSES_CTX_KEY, []) waiting_for_update = context_data.get(MIRRORED_OFFENSES_CTX_KEY, []) resubmitted_ids = context_data.get(RESUBMITTED_MIRRORED_OFFENSES_CTX_KEY, []) last_fetch_key = context_data.get(LAST_FETCH_KEY, 'Missing') last_mirror_update = context_data.get('last_mirror_update', 0) samples = context_data.get('samples', []) sample_length = 0 if samples: sample_length = len(samples[0]) not_updated_ids = [str(offense.get('id')) for offense in waiting_for_update] stats = [(str(offense.get('id')), len(offense.get('events', []))) for offense in updated] print_debug_msg(f"Mirror Events Stats: {stage}\n Updated Offenses (id, len(events)): {stats}" f"\n Offenses ids waiting for update: {not_updated_ids}" f"\n Resubmitted offenses: {resubmitted_ids}" f"\n Last Fetch Key {last_fetch_key}, Last mirror update {last_mirror_update}, " f"sample length {sample_length}") updated_ids = [offense_id for offense_id, events_num in stats] return set(not_updated_ids + updated_ids + resubmitted_ids) @safely_update_context_data def move_updated_offenses(context_data: dict, version: Any, include_context_data: dict, updated_list: list) -> Tuple[dict, Any, Any]: """Move updated offenses from MIRRORED_OFFENSES_CTX_KEY to UPDATED_MIRRORED_OFFENSES_CTX_KEY. Args: context_data: The context data to update version: The version of the context data include_context_data: The context data changes to include updated_list: The list of updated offenses Returns: (The new context data, the context data version the changes were based on, The new context_data) """ new_context_data = include_context_data.copy() if updated_list: all_updated_mirrored_offenses = merge_lists( original_list=context_data.get(UPDATED_MIRRORED_OFFENSES_CTX_KEY, []), updated_list=updated_list, key='id') not_updated_list = exclude_lists(original=context_data.get(MIRRORED_OFFENSES_CTX_KEY, []), exclude=updated_list, key="id") new_context_data.update({UPDATED_MIRRORED_OFFENSES_CTX_KEY: all_updated_mirrored_offenses, MIRRORED_OFFENSES_CTX_KEY: not_updated_list, RESUBMITTED_MIRRORED_OFFENSES_CTX_KEY: context_data.get(RESUBMITTED_MIRRORED_OFFENSES_CTX_KEY, [])}) # type: ignore else: new_context_data.update( {UPDATED_MIRRORED_OFFENSES_CTX_KEY: context_data.get(UPDATED_MIRRORED_OFFENSES_CTX_KEY, []), MIRRORED_OFFENSES_CTX_KEY: context_data.get(MIRRORED_OFFENSES_CTX_KEY, []), RESUBMITTED_MIRRORED_OFFENSES_CTX_KEY: context_data.get(RESUBMITTED_MIRRORED_OFFENSES_CTX_KEY, [])}) if not new_context_data.get('samples'): new_context_data.update({'samples': context_data.get('samples')}) if not new_context_data.get('last_mirror_update'): new_context_data.update({'last_mirror_update': str(context_data.get('last_mirror_update', 0))}) return encode_context_data(new_context_data, include_id=True), version, new_context_data def perform_long_running_loop(client: Client, offenses_per_fetch: int, fetch_mode: str, mirror_options: str, user_query: str, events_columns: str, events_limit: int, ip_enrich: bool, asset_enrich: bool, incident_type: Optional[str], mirror_direction: Optional[str]): is_reset_triggered() ctx, ctx_version = get_integration_context_with_version() print_debug_msg(f'Starting fetch loop. Fetch mode: {fetch_mode}, Mirror option: {mirror_options}.') incidents, new_highest_id = get_incidents_long_running_execution( client=client, offenses_per_fetch=offenses_per_fetch, user_query=user_query, fetch_mode=fetch_mode, events_columns=events_columns, events_limit=events_limit, ip_enrich=ip_enrich, asset_enrich=asset_enrich, last_highest_id=int(json.loads(ctx.get(LAST_FETCH_KEY, '0'))), incident_type=incident_type, mirror_direction=mirror_direction ) orig_context_data = extract_context_data(ctx.copy(), include_id=True) context_data = {LAST_FETCH_KEY: orig_context_data.get(LAST_FETCH_KEY, 0)} updated_mirrored_offenses = None ctx = extract_context_data(ctx) if mirror_options == MIRROR_OFFENSE_AND_EVENTS: print_mirror_events_stats(ctx, "Long Running Command - Before Update") updated_mirrored_offenses = update_mirrored_events(client=client, fetch_mode=fetch_mode, events_columns=events_columns, events_limit=events_limit, context_data=ctx, offenses_per_fetch=offenses_per_fetch) if incidents and new_highest_id: incident_batch_for_sample = incidents[:SAMPLE_SIZE] if incidents else ctx.get('samples', []) if incident_batch_for_sample: print_debug_msg(f'Saving New Highest ID: {new_highest_id}') context_data.update({'samples': incident_batch_for_sample, LAST_FETCH_KEY: int(new_highest_id)}) # if incident creation fails, it'll drop the data and try again in the next iteration demisto.createIncidents(incidents) new_context_data = move_updated_offenses(context_data=ctx, version=ctx_version, include_context_data=context_data, updated_list=updated_mirrored_offenses) print_mirror_events_stats(new_context_data, "Long Running Command - After Update") def long_running_execution_command(client: Client, params: Dict): """ Long running execution of fetching incidents from QRadar service. Will continue to fetch in an infinite loop offenses from QRadar, Enriching each offense with events/IPs/assets according to the configurations given in Demisto params. transforming the offenses into incidents and sending them to Demisto to save the incidents. Args: client (Client): Client to perform API calls. params (Dict): Demisto params. """ validate_long_running_params(params) fetch_mode = params.get('fetch_mode', '') ip_enrich, asset_enrich = get_offense_enrichment(params.get('enrichment', 'IPs And Assets')) offenses_per_fetch = int(params.get('offenses_per_fetch')) # type: ignore user_query = params.get('query', '') events_columns = params.get('events_columns', '') events_limit = int(params.get('events_limit') or DEFAULT_EVENTS_LIMIT) incident_type = params.get('incident_type') mirror_options = params.get('mirror_options', DEFAULT_MIRRORING_DIRECTION) mirror_direction = MIRROR_DIRECTION.get(mirror_options) reset_mirroring_vars = False while not reset_mirroring_vars: try: reset_mirroring_events_variables(mirror_options) reset_mirroring_vars = True except Exception as e: print_debug_msg( f'Error while reseting mirroring variables, retring. Error details: {str(e)} \n' f'{traceback.format_exc()}') demisto.error('Exception when calling reset_mirroring_events_variables') raise e while True: try: perform_long_running_loop( client=client, offenses_per_fetch=offenses_per_fetch, fetch_mode=fetch_mode, mirror_options=mirror_options, user_query=user_query, events_columns=events_columns, events_limit=events_limit, ip_enrich=ip_enrich, asset_enrich=asset_enrich, incident_type=incident_type, mirror_direction=mirror_direction ) demisto.updateModuleHealth('') except Exception as e: msg = f'Error occurred during long running loop: {e}' demisto.updateModuleHealth(msg) demisto.error(msg) demisto.error(traceback.format_exc()) finally: print_debug_msg('Finished fetch loop') time.sleep(FETCH_SLEEP) def qradar_offenses_list_command(client: Client, args: Dict) -> CommandResults: """ Retrieves list of offenses from QRadar service. possible arguments: - offense_id: Retrieves details of the specific offense that corresponds to the ID given. - range: Range of offenses to return (e.g.: 0-20, 3-5, 3-3). - filter: Query filter to filter results returned by QRadar service. see https://www.ibm.com/support/knowledgecenter/SS42VS_SHR/com.ibm.qradarapi.doc/c_rest_api_filtering.html for more details. - fields: If used, will filter all fields except for the specified ones. Use this parameter to specify which fields you would like to get back in the response. Fields that are not explicitly named are excluded. Args: client (Client): QRadar client to perform the API call. args (Dict): Demisto args. Returns: CommandResults. """ offense_id = args.get('offense_id') range_ = f'''items={args.get('range', DEFAULT_RANGE_VALUE)}''' filter_ = args.get('filter') fields = args.get('fields') ip_enrich, asset_enrich = get_offense_enrichment(args.get('enrichment', 'None')) # if this call fails, raise an error and stop command execution response = client.offenses_list(range_, offense_id, filter_, fields) enriched_outputs = enrich_offenses_result(client, response, ip_enrich, asset_enrich) final_outputs = sanitize_outputs(enriched_outputs, OFFENSE_OLD_NEW_NAMES_MAP) headers = build_headers(['ID', 'Description', 'OffenseType', 'Status', 'Severity'], set(OFFENSE_OLD_NEW_NAMES_MAP.values())) return CommandResults( readable_output=tableToMarkdown('Offenses List', final_outputs, headers=headers, removeNull=True), outputs_prefix='QRadar.Offense', outputs_key_field='ID', outputs=final_outputs, raw_response=response ) def qradar_offense_update_command(client: Client, args: Dict) -> CommandResults: """ Updates offense that corresponds to the given offense ID. possible arguments: - offense_id (Required): Update offense that corresponds to ID given. - protected: Whether the offense is protected. - follow_up: Whether the offense should be marked for follow up. - status: Status of the offense. One of 'OPEN', 'HIDDEN', 'CLOSED'. - closing_reason_id: The ID of the reason the offense was closed. full list of closing reason IDs, full list of closing reason IDs can be retrieved by 'qradar-closing-reasons' command. - assigned_to: The user whom to assign the offense to. - fields: If used, will filter all fields except for the specified ones. Use this parameter to specify which fields you would like to get back in the response. Fields that are not explicitly named are excluded. Args: client (Client): QRadar client to perform the API call. args (Dict): Demisto args. Returns: CommandResults. """ offense_id: int = int(args['offense_id']) protected = args.get('protected') follow_up = args.get('follow_up') closing_reason_name = args.get('closing_reason_name') status = args.get('status') closing_reason_id = args.get('closing_reason_id') if status == 'CLOSED' and (not closing_reason_id and not closing_reason_name): raise DemistoException( '''Closing reason ID must be provided when closing an offense. Available closing reasons can be achieved by 'qradar-closing-reasons' command.''' ) if closing_reason_name: # if this call fails, raise an error and stop command execution closing_reasons_list = client.closing_reasons_list(include_deleted=True, include_reserved=True) for closing_reason in closing_reasons_list: if closing_reason.get('text') == closing_reason_name: closing_reason_id = closing_reason.get('id') if not closing_reason_id: raise DemistoException(f'Could not find closing reason name {closing_reason_name}. Please provide a valid' ' closing reason name. Closing reasons can be retrieved by running the ' 'qradar-closing-reasons command.') assigned_to = args.get('assigned_to') fields = args.get('fields') ip_enrich, asset_enrich = get_offense_enrichment(args.get('enrichment', 'None')) # if this call fails, raise an error and stop command execution response = client.offense_update(offense_id, protected, follow_up, status, closing_reason_id, assigned_to, fields) enriched_outputs = enrich_offenses_result(client, response, ip_enrich, asset_enrich) final_outputs = sanitize_outputs(enriched_outputs, OFFENSE_OLD_NEW_NAMES_MAP) headers = build_headers(['ID', 'Description', 'OffenseType', 'Status', 'Severity'], set(OFFENSE_OLD_NEW_NAMES_MAP.values())) return CommandResults( readable_output=tableToMarkdown('offense Update', final_outputs, headers, removeNull=True), outputs_prefix='QRadar.Offense', outputs_key_field='ID', outputs=final_outputs, raw_response=response ) def qradar_closing_reasons_list_command(client: Client, args: Dict) -> CommandResults: """ Retrieves list of closing reasons from QRadar service. possible arguments: - closing_reason_id: Retrieves details of the specific closing reason that corresponds to the ID given. - range: Range of offenses to return (e.g.: 0-20, 3-5, 3-3). - filter: Query filter to filter results returned by QRadar service. see https://www.ibm.com/support/knowledgecenter/SS42VS_SHR/com.ibm.qradarapi.doc/c_rest_api_filtering.html for more details. - fields: If used, will filter all fields except for the specified ones. Use this parameter to specify which fields you would like to get back in the response. Fields that are not explicitly named are excluded. Args: client (Client): QRadar client to perform the API call. args (Dict): Demisto args. Returns: CommandResults. """ closing_reason_id = args.get('closing_reason_id') include_reserved = argToBoolean(args.get('include_reserved', False)) include_deleted = argToBoolean(args.get('include_deleted', False)) range_ = f'''items={args.get('range', DEFAULT_RANGE_VALUE)}''' filter_ = args.get('filter') fields = args.get('fields') # if this call fails, raise an error and stop command execution response = client.closing_reasons_list(closing_reason_id, include_reserved, include_deleted, range_, filter_, fields) outputs = sanitize_outputs(response, CLOSING_REASONS_OLD_NEW_MAP) headers = build_headers(['ID', 'Name'], set(CLOSING_REASONS_OLD_NEW_MAP.values())) return CommandResults( readable_output=tableToMarkdown('Closing Reasons', outputs, headers=headers, removeNull=True), outputs_prefix='QRadar.Offense.ClosingReasons', outputs_key_field='ID', outputs=outputs, raw_response=response ) def qradar_offense_notes_list_command(client: Client, args: Dict) -> CommandResults: """ Retrieves list of notes corresponding to the ID of the offense ID given from QRadar service. possible arguments: - offense_id: The offense ID to retrieve the notes for. - note_id: The note ID to its details. - range: Range of notes to return for the offense corresponding to the offense ID (e.g.: 0-20, 3-5, 3-3). - filter: Query filter to filter results returned by QRadar service. see https://www.ibm.com/support/knowledgecenter/SS42VS_SHR/com.ibm.qradarapi.doc/c_rest_api_filtering.html for more details. - fields: If used, will filter all fields except for the specified ones. Use this parameter to specify which fields you would like to get back in the response. Fields that are not explicitly named are excluded. Args: client (Client): QRadar client to perform the API call. args (Dict): Demisto args. Returns: CommandResults. """ offense_id: int = int(args['offense_id']) note_id = args.get('note_id') range_ = f'''items={args.get('range', DEFAULT_RANGE_VALUE)}''' filter_ = args.get('filter') fields = args.get('fields') # if this call fails, raise an error and stop command execution response = client.offense_notes_list(offense_id, range_, note_id, filter_, fields) outputs = sanitize_outputs(response, NOTES_OLD_NEW_MAP) headers = build_headers(['ID', 'Text', 'CreatedBy', 'CreateTime'], set(NOTES_OLD_NEW_MAP.values())) return CommandResults( readable_output=tableToMarkdown(f'Offense Notes List For Offense ID {offense_id}', outputs, headers, removeNull=True), outputs_prefix='QRadar.Note', outputs_key_field='ID', outputs=outputs, raw_response=response ) def qradar_offense_notes_create_command(client: Client, args: Dict) -> CommandResults: """ Create a new note for the offense corresponding to the given offense ID with the note text given to QRadar service. possible arguments: - offense_id: The offense ID to add note to. - note_text: The note text. - fields: If used, will filter all fields except for the specified ones. Use this parameter to specify which fields you would like to get back in the response. Fields that are not explicitly named are excluded. Args: client (Client): QRadar client to perform the API call. args (Dict): Demisto args. Returns: CommandResults. """ offense_id: int = int(args['offense_id']) note_text: str = args.get('note_text', '') fields = args.get('fields') # if this call fails, raise an error and stop command execution response = client.offense_notes_create(offense_id, note_text, fields) outputs = sanitize_outputs(response, NOTES_OLD_NEW_MAP) headers = build_headers(['ID', 'Text', 'CreatedBy', 'CreateTime'], set(NOTES_OLD_NEW_MAP.values())) return CommandResults( readable_output=tableToMarkdown('Create Note', outputs, headers, removeNull=True), outputs_prefix='QRadar.Note', outputs_key_field='ID', outputs=outputs, raw_response=response ) def qradar_rules_list_command(client: Client, args: Dict) -> CommandResults: """ Retrieves list of rules from QRadar service. possible arguments: - rule_id: Retrieves details of the specific rule that corresponds to the ID given. - rule_type: Retrieves rules corresponding to the given rule type. - range: Range of offenses to return (e.g.: 0-20, 3-5, 3-3). - filter: Query filter to filter results returned by QRadar service. see https://www.ibm.com/support/knowledgecenter/SS42VS_SHR/com.ibm.qradarapi.doc/c_rest_api_filtering.html for more details. - fields: If used, will filter all fields except for the specified ones. Use this parameter to specify which fields you would like to get back in the response. Fields that are not explicitly named are excluded. Args: client (Client): QRadar client to perform the API call. args (Dict): Demisto args. Returns: CommandResults. """ rule_id = args.get('rule_id') rule_type = args.get('rule_type') range_ = f'''items={args.get('range', DEFAULT_RANGE_VALUE)}''' filter_ = args.get('filter') fields = args.get('fields') if not filter_ and rule_type: filter_ = f'type={rule_type}' # if this call fails, raise an error and stop command execution response = client.rules_list(rule_id, range_, filter_, fields) outputs = sanitize_outputs(response, RULES_OLD_NEW_MAP) headers = build_headers(['ID', 'Name', 'Type'], set(RULES_OLD_NEW_MAP.values())) return CommandResults( readable_output=tableToMarkdown('Rules List', outputs, headers=headers, removeNull=True), outputs_prefix='QRadar.Rule', outputs_key_field='ID', outputs=outputs, raw_response=response ) def qradar_rule_groups_list_command(client: Client, args: Dict) -> CommandResults: """ Retrieves list of rule groups from QRadar service. possible arguments: - rule_group_id: Retrieves details of the specific rule group that corresponds to the ID given. - range: Range of offenses to return (e.g.: 0-20, 3-5, 3-3). - filter: Query filter to filter results returned by QRadar service. see https://www.ibm.com/support/knowledgecenter/SS42VS_SHR/com.ibm.qradarapi.doc/c_rest_api_filtering.html for more details. - fields: If used, will filter all fields except for the specified ones. Use this parameter to specify which fields you would like to get back in the response. Fields that are not explicitly named are excluded. Args: client (Client): QRadar client to perform the API call. args (Dict): Demisto args. Returns: CommandResults. """ rule_group_id = arg_to_number(args.get('rule_group_id')) range_ = f'''items={args.get('range', DEFAULT_RANGE_VALUE)}''' filter_ = args.get('filter') fields = args.get('fields') # if this call fails, raise an error and stop command execution response = client.rule_groups_list(range_, rule_group_id, filter_, fields) outputs = sanitize_outputs(response, RULES_GROUP_OLD_NEW_MAP) headers = build_headers(['ID', 'Name', 'Description', 'Owner'], set(RULES_GROUP_OLD_NEW_MAP.values())) return CommandResults( readable_output=tableToMarkdown('Rules Group List', outputs, headers, removeNull=True), outputs_prefix='QRadar.RuleGroup', outputs_key_field='ID', outputs=outputs, raw_response=response ) def qradar_assets_list_command(client: Client, args: Dict) -> CommandResults: """ Retrieves list of assets from QRadar service. possible arguments: - asset_id: Retrieves details of the specific asset that corresponds to the ID given. - range: Range of offenses to return (e.g.: 0-20, 3-5, 3-3). - filter: Query filter to filter results returned by QRadar service. see https://www.ibm.com/support/knowledgecenter/SS42VS_SHR/com.ibm.qradarapi.doc/c_rest_api_filtering.html for more details. - fields: If used, will filter all fields except for the specified ones. Use this parameter to specify which fields you would like to get back in the response. Fields that are not explicitly named are excluded. Args: client (Client): QRadar client to perform the API call. args (Dict): Demisto args. Returns: CommandResults. """ asset_id = args.get('asset_id') range_ = f'''items={args.get('range', DEFAULT_RANGE_VALUE)}''' filter_ = args.get('filter') fields = args.get('fields') # If asset ID was given, override filter if both filter and asset ID were given. if asset_id: filter_ = f'id={asset_id}' full_enrichment = True if asset_id else False # if this call fails, raise an error and stop command execution response = client.assets_list(range_, filter_, fields) enriched_outputs = enrich_assets_results(client, response, full_enrichment) assets_results = dict() assets_hr = [] endpoints = [] for output in enriched_outputs: output['Asset']['hostnames'] = add_iso_entries_to_dict(output.get('Asset', dict()).get('hostnames', [])) output['Asset']['users'] = add_iso_entries_to_dict(output.get('Asset', dict()).get('users', [])) output['Asset']['products'] = add_iso_entries_to_dict(output.get('Asset', dict()).get('products', [])) output['Asset'] = sanitize_outputs(output.get('Asset'), ASSET_OLD_NEW_MAP)[0] assets_hr.append(output['Asset']) assets_results[f'''QRadar.Asset(val.ID === "{output['Asset']['ID']}")'''] = output['Asset'] sanitized_endpoint = remove_empty_elements(output.get('Endpoint', dict())) if sanitized_endpoint: endpoints.append(sanitized_endpoint) asset_human_readable = tableToMarkdown('Assets List', assets_hr, removeNull=True) endpoints_human_readable = tableToMarkdown('Endpoints', endpoints, removeNull=True) if endpoints: assets_results['Endpoint'] = endpoints return CommandResults( readable_output=asset_human_readable + endpoints_human_readable, outputs=assets_results, raw_response=response ) def qradar_saved_searches_list_command(client: Client, args: Dict) -> CommandResults: """ Retrieves list of saved searches from QRadar service. possible arguments: - saved_search_id: Retrieves details of the specific saved search that corresponds to the ID given. - range: Range of offenses to return (e.g.: 0-20, 3-5, 3-3). - filter: Query filter to filter results returned by QRadar service. see https://www.ibm.com/support/knowledgecenter/SS42VS_SHR/com.ibm.qradarapi.doc/c_rest_api_filtering.html for more details. - fields: If used, will filter all fields except for the specified ones. Use this parameter to specify which fields you would like to get back in the response. Fields that are not explicitly named are excluded. Args: client (Client): QRadar client to perform the API call. args (Dict): Demisto args. Returns: CommandResults. """ saved_search_id = args.get('saved_search_id') timeout: Optional[int] = arg_to_number(args.get('timeout', DEFAULT_TIMEOUT_VALUE)) range_ = f'''items={args.get('range', DEFAULT_RANGE_VALUE)}''' filter_ = args.get('filter') fields = args.get('fields') # if this call fails, raise an error and stop command execution response = client.saved_searches_list(range_, timeout, saved_search_id, filter_, fields) outputs = sanitize_outputs(response, SAVED_SEARCH_OLD_NEW_MAP) headers = build_headers(['ID', 'Name', 'Description'], set(SAVED_SEARCH_OLD_NEW_MAP.values())) return CommandResults( readable_output=tableToMarkdown('Saved Searches List', outputs, headers, removeNull=True), outputs_prefix='QRadar.SavedSearch', outputs_key_field='ID', outputs=outputs, raw_response=response ) def qradar_searches_list_command(client: Client, args: Dict) -> CommandResults: """ Retrieves list of searches IDs from QRadar service. possible arguments: - range: Range of offenses to return (e.g.: 0-20, 3-5, 3-3). - filter: Query filter to filter results returned by QRadar service. see https://www.ibm.com/support/knowledgecenter/SS42VS_SHR/com.ibm.qradarapi.doc/c_rest_api_filtering.html for more details. Args: client (Client): QRadar client to perform the API call. args (Dict): Demisto args. Returns: CommandResults. """ range_ = f'''items={args.get('range', DEFAULT_RANGE_VALUE)}''' filter_ = args.get('filter') # if this call fails, raise an error and stop command execution response = client.searches_list(range_, filter_) outputs = [{'SearchID': search_id} for search_id in response] return CommandResults( readable_output=tableToMarkdown('Search ID List', outputs), outputs_prefix='QRadar.SearchID', outputs_key_field='SearchID', outputs=outputs, raw_response=response ) def qradar_search_create_command(client: Client, args: Dict) -> CommandResults: """ Create a search in QRadar service. possible arguments: - query_expression: The AQL query to execute. Mutually exclusive with saved_search_id. - saved_search_id: Saved search ID to execute. Mutually exclusive with query_expression. Args: client (Client): QRadar client to perform the API call. args (Dict): Demisto args. Returns: CommandResults. """ query_expression = args.get('query_expression') saved_search_id = args.get('saved_search_id') # if this call fails, raise an error and stop command execution response = client.search_create(query_expression, saved_search_id) outputs = sanitize_outputs(response, SEARCH_OLD_NEW_MAP) return CommandResults( readable_output=tableToMarkdown('Create Search', outputs), outputs_prefix='QRadar.Search', outputs_key_field='ID', outputs=outputs, raw_response=response ) def qradar_search_status_get_command(client: Client, args: Dict) -> CommandResults: """ Retrieves search status from QRadar service. possible arguments: - search_id (Required): The search ID to retrieve its status. Args: client (Client): QRadar client to perform the API call. args (Dict): Demisto args. Returns: CommandResults. """ search_id: str = args.get('search_id', '') # if this call fails, raise an error and stop command execution response = client.search_status_get(search_id) outputs = sanitize_outputs(response, SEARCH_OLD_NEW_MAP) return CommandResults( readable_output=tableToMarkdown(f'Search Status For Search ID {search_id}', outputs), outputs_prefix='QRadar.Search', outputs_key_field='ID', outputs=outputs, raw_response=response ) def qradar_search_results_get_command(client: Client, args: Dict) -> CommandResults: """ Retrieves search results from QRadar service. possible arguments: - search_id: Search ID to retrieve its results. - output_path: If specified, will be context output path prefix. - range: Range of offenses to return (e.g.: 0-20, 3-5, 3-3). Args: client (Client): QRadar client to perform the API call. args (Dict): Demisto args. Returns: CommandResults. """ search_id: str = args.get('search_id', '') output_path = args.get('output_path') # Using or instead of default value for QRadarFullSearch backward compatibility range_ = f'''items={args.get('range') or DEFAULT_RANGE_VALUE}''' # if this call fails, raise an error and stop command execution response = client.search_results_get(search_id, range_) if not response: raise DemistoException('Unexpected response from QRadar service.') result_key = list(response.keys())[0] outputs = sanitize_outputs(response.get(result_key)) outputs_prefix = output_path if output_path else f'QRadar.Search(val.ID === "{search_id}").Result.{result_key}' return CommandResults( readable_output=tableToMarkdown(f'Search Results For Search ID {search_id}', outputs), outputs_prefix=outputs_prefix, outputs=outputs, raw_response=response ) def qradar_reference_sets_list_command(client: Client, args: Dict) -> CommandResults: """ Retrieves list of reference sets from QRadar service. possible arguments: - ref_name: Retrieves details of the specific reference that corresponds to the reference name given. - range: Range of offenses to return (e.g.: 0-20, 3-5, 3-3). - filter: Query filter to filter results returned by QRadar service. see https://www.ibm.com/support/knowledgecenter/SS42VS_SHR/com.ibm.qradarapi.doc/c_rest_api_filtering.html for more details. - fields: If used, will filter all fields except for the specified ones. Use this parameter to specify which fields you would like to get back in the response. Fields that are not explicitly named are excluded. Args: client (Client): QRadar client to perform the API call. args (Dict): Demisto args. Returns: CommandResults. """ ref_name = args.get('ref_name') convert_date_value = argToBoolean(args.get('date_value', False)) range_ = f'''items={args.get('range', DEFAULT_RANGE_VALUE)}''' filter_ = args.get('filter') fields = args.get('fields') # if this call fails, raise an error and stop command execution response = client.reference_sets_list(range_, ref_name, filter_, fields) if ref_name: outputs = dict(response) if convert_date_value and outputs.get('element_type') == 'DATE': for data_entry in outputs.get('data', []): data_entry['value'] = get_time_parameter(data_entry.get('value'), iso_format=True) outputs['data'] = sanitize_outputs(outputs.get('data', []), REFERENCE_SET_DATA_OLD_NEW_MAP) else: outputs = response final_outputs = sanitize_outputs(outputs, REFERENCE_SETS_OLD_NEW_MAP) headers = build_headers(['Name', 'ElementType', 'Data', 'TimeToLive', 'TimeoutType'], set(REFERENCE_SETS_OLD_NEW_MAP.values())) return CommandResults( readable_output=tableToMarkdown('Reference Sets List', final_outputs, headers, removeNull=True), outputs_prefix='QRadar.Reference', outputs_key_field='Name', outputs=final_outputs, raw_response=response ) def qradar_reference_set_create_command(client: Client, args: Dict) -> CommandResults: """ Create a new reference set. possible arguments: - ref_name (Required): The name of the new reference set. - element_type (Required): The type of the new reference set. Can be ALN (alphanumeric), ALNIC (alphanumeric ignore case), IP (IP address), NUM (numeric), PORT (port number) or DATE. - timeout_type: Indicates if the time_to_live interval is based on when the data was first seen or last seen. The allowed values are 'FIRST_SEEN', 'LAST_SEEN' and 'UNKNOWN'. The default value is 'UNKNOWN'. - time_to_live: The time to live interval, for example: '1 month' or '5 minutes'. - fields: If used, will filter all fields except for the specified ones. Use this parameter to specify which fields you would like to get back in the response. Fields that are not explicitly named are excluded. Args: client (Client): QRadar client to perform the API call. args (Dict): Demisto args. Returns: CommandResults. """ ref_name: str = args.get('ref_name', '') element_type: str = args.get('element_type', '') timeout_type = args.get('timeout_type') time_to_live = args.get('time_to_live') fields = args.get('fields') # if this call fails, raise an error and stop command execution response = client.reference_set_create(ref_name, element_type, timeout_type, time_to_live, fields) outputs = sanitize_outputs(response, REFERENCE_SETS_OLD_NEW_MAP) headers = build_headers(['Name', 'ElementType', 'Data', 'TimeToLive', 'TimeoutType'], set(REFERENCE_SETS_OLD_NEW_MAP.values())) return CommandResults( readable_output=tableToMarkdown('Reference Set Create', outputs, headers, removeNull=True), outputs_prefix='QRadar.Reference', outputs_key_field='Name', outputs=outputs, raw_response=response ) def qradar_reference_set_delete_command(client: Client, args: Dict) -> CommandResults: """ Removes a reference set or purges its contents. possible arguments: - ref_name (Required): The name of the new reference set. - purge_only: Indicates if the reference set should have its contents purged (true), keeping the reference set structure. If the value is 'false', or not specified the reference set is removed completely. Default is 'false'. - fields: If used, will filter all fields except for the specified ones. Use this parameter to specify which fields you would like to get back in the response. Fields that are not explicitly named are excluded. Args: client (Client): QRadar client to perform the API call. args (Dict): Demisto args. Returns: CommandResults. """ ref_name: str = args.get('ref_name', '') purge_only = args.get('purge_only') fields = args.get('fields') # if this call fails, raise an error and stop command execution response = client.reference_set_delete(ref_name, purge_only, fields) return CommandResults( raw_response=response, readable_output=f'Request to delete reference {ref_name} was submitted.' f''' Current deletion status: {response.get('status', 'Unknown')}''') def qradar_reference_set_value_upsert_command(client: Client, args: Dict) -> CommandResults: """ Update or insert new value to a reference set from QRadar service. possible arguments: - ref_name (Required): The reference name to insert/update a value for. - values (Required): Comma separated list. All the values to be inserted/updated. - source: An indication of where the data originated. Default is reference data api. - date_value: Boolean, specifies if values given are dates or not. - range: Range of offenses to return (e.g.: 0-20, 3-5, 3-3). - fields: If used, will filter all fields except for the specified ones. Use this parameter to specify which fields you would like to get back in the response. Fields that are not explicitly named are excluded. Args: client (Client): QRadar client to perform the API call. args (Dict): Demisto args. Returns: CommandResults. """ ref_name: str = args.get('ref_name', '') values: List[str] = argToList(args.get('value', '')) if not values: raise DemistoException('Value to insert must be given.') source = args.get('source') date_value = argToBoolean(args.get('date_value', False)) fields = args.get('fields') if date_value: values = [get_time_parameter(value, epoch_format=True) for value in values] # if one of these calls fail, raise an error and stop command execution if len(values) == 1: response = client.reference_set_value_upsert(ref_name, values[0], source, fields) else: response = client.indicators_upload(ref_name, values, fields) outputs = sanitize_outputs(response, REFERENCE_SETS_OLD_NEW_MAP) return CommandResults( readable_output=tableToMarkdown('Reference Update Create', outputs, ['Name', 'ElementType', 'TimeToLive', 'TimeoutType', 'NumberOfElements', 'CreationTime'], removeNull=True), outputs_prefix='QRadar.Reference', outputs_key_field='Name', outputs=outputs, raw_response=response ) def qradar_reference_set_value_delete_command(client: Client, args: Dict) -> CommandResults: """ Delete a value in reference set from QRadar service. possible arguments: - ref_name (Required): The reference name to insert/update a value for. - value (Required): Value to be deleted. - date_value: Boolean, specifies if values given are dates or not. Args: client (Client): QRadar client to perform the API call. args (Dict): Demisto args. Returns: CommandResults. """ ref_name: str = args.get('ref_name', '') value: str = args.get('value', '') date_value = argToBoolean(args.get('date_value', False)) original_value = value if date_value: value = get_time_parameter(original_value, epoch_format=True) # if this call fails, raise an error and stop command execution response = client.reference_set_value_delete(ref_name, value) human_readable = f'### value: {original_value} of reference: {ref_name} was deleted successfully' return CommandResults( readable_output=human_readable, raw_response=response ) def qradar_domains_list_command(client: Client, args: Dict) -> CommandResults: """ Retrieves list of domains sets from QRadar service. If you do not have the System Administrator or Security Administrator permissions, then for each domain assigned to your security profile you can only view the values for the id and name fields. All other values return null. possible arguments: - domain_id: Retrieves details of the specific domain that corresponds to the ID given. - range: Range of offenses to return (e.g.: 0-20, 3-5, 3-3). - filter: Query filter to filter results returned by QRadar service. see https://www.ibm.com/support/knowledgecenter/SS42VS_SHR/com.ibm.qradarapi.doc/c_rest_api_filtering.html for more details. - fields: If used, will filter all fields except for the specified ones. Use this parameter to specify which fields you would like to get back in the response. Fields that are not explicitly named are excluded. Args: client (Client): QRadar client to perform the API call. args (Dict): Demisto args. Returns: CommandResults. """ # backward compatibility for domain_id argument named is 'id' in QRadar v2. domain_id = args.get('domain_id') or args.get('id') range_ = f'''items={args.get('range', DEFAULT_RANGE_VALUE)}''' filter_ = args.get('filter') fields = args.get('fields') # if this call fails, raise an error and stop command execution response = client.domains_list(domain_id, range_, filter_, fields) outputs = sanitize_outputs(response, DOMAIN_OLD_NEW_MAP) return CommandResults( readable_output=tableToMarkdown('Domains List', outputs, removeNull=True), outputs_prefix='QRadar.Domains', outputs_key_field='ID', outputs=outputs, raw_response=response ) def qradar_indicators_upload_command(client: Client, args: Dict) -> CommandResults: """ Uploads list of indicators from Demisto to a reference set in QRadar service. possible arguments: - ref_name (Required): Name of the reference set to upload indicators to. - query: The query for getting indicators from Demisto. - limit: Maximum number of indicators to fetch from Demisto. - page: The page from which to get the indicators. - fields: If used, will filter all fields except for the specified ones. Use this parameter to specify which fields you would like to get back in the response. Fields that are not explicitly named are excluded. Args: client (Client): QRadar client to perform the API call. args (Dict): Demisto args. Returns: CommandResults. """ ref_name: str = args.get('ref_name', '') query = args.get('query') limit = arg_to_number(args.get('limit', DEFAULT_LIMIT_VALUE)) page = arg_to_number(args.get('page', 0)) fields = args.get('fields') # Backward compatibility for QRadar V2 command. Create reference set for given 'ref_name' if does not exist. element_type = args.get('element_type', '') timeout_type = args.get('timeout_type') time_to_live = args.get('time_to_live') try: client.reference_sets_list(ref_name=ref_name) except DemistoException as e: # Create reference set if does not exist if e.message and f'{ref_name} does not exist' in e.message: # if this call fails, raise an error and stop command execution client.reference_set_create(ref_name, element_type, timeout_type, time_to_live) else: raise e search_indicators = IndicatorsSearcher(page=page) indicators = search_indicators.search_indicators_by_version(query=query, size=limit).get('iocs', []) indicators_data = [{'Indicator Value': indicator.get('value'), 'Indicator Type': indicator.get('indicator_type')} for indicator in indicators if 'value' in indicator and 'indicator_type' in indicator] indicator_values: List[Any] = [indicator.get('Indicator Value') for indicator in indicators_data] if not indicators_data: return CommandResults( readable_output=f'No indicators were found for reference set {ref_name}' ) # if this call fails, raise an error and stop command execution response = client.indicators_upload(ref_name, indicator_values, fields) outputs = sanitize_outputs(response) reference_set_hr = tableToMarkdown(f'Indicators Upload For Reference Set {ref_name}', outputs) indicators_uploaded_hr = tableToMarkdown('Indicators Uploaded', indicators_data) return CommandResults( readable_output=f'{reference_set_hr}\n{indicators_uploaded_hr}', outputs_prefix='QRadar.Reference', outputs_key_field='name', outputs=outputs, raw_response=response ) def flatten_nested_geolocation_values(geolocation_dict: Dict, dict_key: str, nested_value_keys: List[str]) -> Dict: """ Receives output from geolocation IPs command, and does: 1) flattens output, takes nested keys values. 2) Converts keys to prefix of 'dict_key' and suffix of nested key as camel case. Args: geolocation_dict (Dict): The dict to flatten. dict_key (Dict): The key of the inner dict to use his values. nested_value_keys (Dict): The keys inside inner dict to take. Returns: (Dict): dict of ({dict_key_name}{camel case nested key}: {nested key value} """ return {f'{camelize_string(dict_key)}{camelize_string(k)}': geolocation_dict.get(dict_key, dict()).get(k) for k in nested_value_keys} def qradar_geolocations_for_ip_command(client: Client, args: Dict) -> CommandResults: """ Retrieves the MaxMind geoip data for the given IP addresses. possible arguments: - ip (Required): Comma separated list. the IPs to retrieve data for. - fields: If used, will filter all fields except for the specified ones. Use this parameter to specify which fields you would like to get back in the response. Fields that are not explicitly named are excluded. Args: client (Client): QRadar client to perform the API call. args (Dict): Demisto args. Returns: CommandResults. """ ips = argToList(args.get('ip')) filter_ = f'''ip_address IN ({','.join(map(lambda ip: f'"{str(ip)}"', ips))})''' fields = args.get('fields') # if this call fails, raise an error and stop command execution response = client.geolocations_for_ip(filter_, fields) outputs = [] for output in response: city_values = flatten_nested_geolocation_values(output, 'city', ['name']) continent_values = flatten_nested_geolocation_values(output, 'continent', ['name']) location_values = flatten_nested_geolocation_values(output, 'location', ['accuracy_radius', 'average_income', 'latitude', 'longitude', 'metro_code', 'population_density', 'timezone']) physical_country_values = flatten_nested_geolocation_values(output, 'physical_country', ['iso_code', 'name']) registered_country_values = flatten_nested_geolocation_values(output, 'registered_country', ['iso_code', 'name']) represented_country_values = flatten_nested_geolocation_values(output, 'represented_country', ['iso_code', 'name', 'confidence']) subdivision_values = flatten_nested_geolocation_values(output, 'subdivision', ['name', 'iso_code', 'confidence']) non_nested_values = { 'IPAddress': output.get('ip_address'), 'Traits': output.get('traits'), 'Coordinates': output.get('geo_json', dict()).get('coordinates'), 'PostalCode': output.get('postal', dict()).get('postal_code'), 'PostalCodeConfidence': output.get('postal', dict()).get('confidence') } final_output = dict(city_values, **continent_values, **location_values, **physical_country_values, **registered_country_values, **represented_country_values, **subdivision_values, **non_nested_values) outputs.append(final_output) final_outputs = sanitize_outputs(outputs) return CommandResults( readable_output=tableToMarkdown('Geolocation For IP', final_outputs), outputs_prefix='QRadar.GeoForIP', outputs_key_field='IPAddress', outputs=final_outputs, raw_response=response ) def qradar_log_sources_list_command(client: Client, args: Dict) -> CommandResults: """ Retrieves a list of log sources from QRadar service. possible arguments: - qrd_encryption_algorithm: The algorithm to use for encrypting the sensitive data of this endpoint. Using AES 128 - qrd_encryption_password: The password to use for encrypting the sensitive data of this endpoint. If argument was not given, will be randomly generated. - range: Range of offenses to return (e.g.: 0-20, 3-5, 3-3). - filter: Query filter to filter results returned by QRadar service. see https://www.ibm.com/support/knowledgecenter/SS42VS_SHR/com.ibm.qradarapi.doc/c_rest_api_filtering.html for more details. - fields: If used, will filter all fields except for the specified ones. Use this parameter to specify which fields you would like to get back in the response. Fields that are not explicitly named are excluded. Args: client (Client): QRadar client to perform the API call. args (Dict): Demisto args. Returns: CommandResults. """ qrd_encryption_algorithm: str = args.get('qrd_encryption_algorithm', 'AES128') qrd_encryption_password: str = args.get('qrd_encryption_password', secrets.token_urlsafe(20)) range_ = f'''items={args.get('range', DEFAULT_RANGE_VALUE)}''' filter_ = args.get('filter') fields = args.get('fields') # if this call fails, raise an error and stop command execution response = client.log_sources_list(qrd_encryption_algorithm, qrd_encryption_password, range_, filter_, fields) outputs = sanitize_outputs(response, LOG_SOURCES_OLD_NEW_MAP) headers = build_headers(['ID', 'Name', 'Description'], set(LOG_SOURCES_OLD_NEW_MAP.values())) return CommandResults( readable_output=tableToMarkdown('Log Sources List', outputs, headers, removeNull=True), outputs_prefix='QRadar.LogSource', outputs_key_field='ID', outputs=outputs, raw_response=response ) def qradar_get_custom_properties_command(client: Client, args: Dict) -> CommandResults: """ Retrieves a list of event regex properties from QRadar service. possible arguments: - field_names: A comma-separated list of names of an exact properties to search for. - range: Range of offenses to return (e.g.: 0-20, 3-5, 3-3). - filter: Query filter to filter results returned by QRadar service. see https://www.ibm.com/support/knowledgecenter/SS42VS_SHR/com.ibm.qradarapi.doc/c_rest_api_filtering.html for more details. - fields: If used, will filter all fields except for the specified ones. Use this parameter to specify which fields you would like to get back in the response. Fields that are not explicitly named are excluded. Args: client (Client): QRadar client to perform the API call. args (Dict): Demisto args. Returns: CommandResults. """ limit = arg_to_number(args.get('limit', DEFAULT_LIMIT_VALUE)) if limit: range_ = f'items=0-{limit - 1}' else: range_ = f'''items={args.get('range', DEFAULT_RANGE_VALUE)}''' like_names = argToList(args.get('like_name')) field_names = argToList(args.get('field_name')) filter_ = args.get('filter', '') fields = args.get('fields') if not filter_: if field_names: filter_ += f'''name IN ({','.join(map(lambda name: f'"{str(name)}"', field_names))})''' if like_names: filter_ += ' or '.join(map(lambda like: f' name ILIKE "%{like}%"', like_names)) # if this call fails, raise an error and stop command execution response = client.custom_properties(range_, filter_, fields) outputs = sanitize_outputs(response) return CommandResults( readable_output=tableToMarkdown('Custom Properties', outputs, removeNull=True), outputs_prefix='QRadar.Properties', outputs_key_field='identifier', outputs=outputs, raw_response=response ) def perform_ips_command_request(client: Client, args: Dict[str, Any], is_destination_addresses: bool): """ Performs request to QRadar IPs endpoint. Args: client (Client): Client to perform the request to QRadar service. args (Dict[str, Any]): XSOAR arguments. is_destination_addresses (bool): Whether request is for destination addresses or source addresses. Returns: - Request response. """ range_: str = f'''items={args.get('range', DEFAULT_RANGE_VALUE)}''' filter_: Optional[str] = args.get('filter') fields: Optional[str] = args.get('fields') address_type = 'local_destination' if is_destination_addresses else 'source' ips_arg_name: str = f'{address_type}_ip' ips: List[str] = argToList(args.get(ips_arg_name, [])) if ips and filter_: raise DemistoException(f'Both filter and {ips_arg_name} have been supplied. Please supply only one.') if ips: filter_ = ' OR '.join([f'{ips_arg_name}="{ip_}"' for ip_ in ips]) url_suffix = f'{address_type}_addresses' # if this call fails, raise an error and stop command execution response = client.get_addresses(url_suffix, filter_, fields, range_) return response def qradar_ips_source_get_command(client: Client, args: Dict[str, Any]) -> CommandResults: """ Get source IPS from QRadar service. Args: client (Client): Client to perform API calls to QRadar service. args (Dict[str, Any): XSOAR arguments. Returns: (CommandResults). """ response = perform_ips_command_request(client, args, is_destination_addresses=False) outputs = sanitize_outputs(response, SOURCE_IPS_OLD_NEW_MAP) return CommandResults( readable_output=tableToMarkdown('Source IPs', outputs), outputs_prefix='QRadar.SourceIP', outputs_key_field='ID', outputs=outputs, raw_response=response ) def qradar_ips_local_destination_get_command(client: Client, args: Dict[str, Any]) -> CommandResults: """ Get local destination IPS from QRadar service. Args: client (Client): Client to perform API calls to QRadar service. args (Dict[str, Any): XSOAR arguments. Returns: (CommandResults). """ response = perform_ips_command_request(client, args, is_destination_addresses=True) outputs = sanitize_outputs(response, LOCAL_DESTINATION_IPS_OLD_NEW_MAP) return CommandResults( readable_output=tableToMarkdown('Local Destination IPs', outputs), outputs_prefix='QRadar.LocalDestinationIP', outputs_key_field='ID', outputs=outputs, raw_response=response ) def qradar_reset_last_run_command() -> str: """ Puts the reset flag inside integration context. Returns: (str): 'fetch-incidents was reset successfully'. """ ctx = get_integration_context() ctx[RESET_KEY] = True set_to_integration_context_with_retries(ctx) return 'fetch-incidents was reset successfully.' def qradar_get_mapping_fields_command(client: Client) -> Dict: """ Returns Dict object containing the list of fields for an incident type. This command should be used for debugging purposes. Args: client (Client): Client to perform API calls. Returns: (Dict): Contains all the mapping. """ offense = { 'username_count': 'int', 'description': 'str', 'rules': { 'id': 'int', 'type': 'str', 'name': 'str' }, 'event_count': 'int', 'flow_count': 'int', 'assigned_to': 'NoneType', 'security_category_count': 'int', 'follow_up': 'bool', 'source_address_ids': 'str', 'source_count': 'int', 'inactive': 'bool', 'protected': 'bool', 'closing_user': 'str', 'destination_networks': 'str', 'source_network': 'str', 'category_count': 'int', 'close_time': 'str', 'remote_destination_count': 'int', 'start_time': 'str', 'magnitude': 'int', 'last_updated_time': 'str', 'credibility': 'int', 'id': 'int', 'categories': 'str', 'severity': 'int', 'policy_category_count': 'int', 'closing_reason_id': 'str', 'device_count': 'int', 'offense_type': 'str', 'relevance': 'int', 'domain_id': 'int', 'offense_source': 'str', 'local_destination_address_ids': 'int', 'local_destination_count': 'int', 'status': 'str', 'domain_name': 'str' } events = { 'events': { 'qidname_qid': 'str', 'logsourcename_logsourceid': 'str', 'categoryname_highlevelcategory': 'str', 'categoryname_category': 'str', 'protocolname_protocolid': 'str', 'sourceip': 'str', 'sourceport': 'int', 'destinationip': 'str', 'destinationport': 'int', 'qiddescription_qid': 'str', 'username': 'NoneType', 'rulename_creeventlist': 'str', 'sourcegeographiclocation': 'str', 'sourceMAC': 'str', 'sourcev6': 'str', 'destinationgeographiclocation': 'str', 'destinationv6': 'str', 'logsourcetypename_devicetype': 'str', 'credibility': 'int', 'severity': 'int', 'magnitude': 'int', 'eventcount': 'int', 'eventDirection': 'str', 'postNatDestinationIP': 'str', 'postNatDestinationPort': 'int', 'postNatSourceIP': 'str', 'postNatSourcePort': 'int', 'preNatDestinationPort': 'int', 'preNatSourceIP': 'str', 'preNatSourcePort': 'int', 'utf8_payload': 'str', 'starttime': 'str', 'devicetime': 'int' } } assets = { 'assets': { 'interfaces': { 'mac_address': 'str', 'ip_addresses': { 'type': 'str', 'value': 'str' }, 'id': 'int', 'Unified Name': 'str', 'Technical User': 'str', 'Switch ID': 'str', 'Business Contact': 'str', 'CVSS Availability Requirement': 'str', 'Compliance Notes': 'str', 'Primary OS ID': 'str', 'Compliance Plan': 'str', 'Switch Port ID': 'str', 'Weight': 'str', 'Location': 'str', 'CVSS Confidentiality Requirement': 'str', 'Technical Contact': 'str', 'Technical Owner': 'str', 'CVSS Collateral Damage Potential': 'str', 'Description': 'str', 'Business Owner': 'str', 'CVSS Integrity Requirement': 'str' }, 'id': 'int', 'domain_id': 'int', 'domain_name': 'str' } } # if this call fails, raise an error and stop command execution custom_fields = { 'events': {field.get('name'): field.get('property_type') for field in client.custom_properties() if 'name' in field and 'property_type' in field} } fields = { 'Offense': offense, 'Events: Builtin Fields': events, 'Events: Custom Fields': custom_fields, 'Assets': assets, } return fields def update_events_mirror_message(mirror_options: Optional[Any], events_limit: int, failure_message: str, events_count: int, events_mirrored: int) -> str: """Return the offense's events' mirror error message. Args: mirror_options (str): The mirror options for the instance. events_limit (int): The events limit for the mirroring. failure_message (str): A failure message if there was a failure during fetching of events. events_count (int): The number of events in the offense. events_mirrored (int): The number of events mirrored in the offense Returns: (str) An updated offense events mirror message. """ mirroring_events_message = 'Unknown' print_debug_msg(f"mirror_options {mirror_options}\n events_limit {events_limit} \n" f"failure_message {failure_message}\n events_count {events_count}\n " f"events_mirrored {events_mirrored}") if mirror_options != MIRROR_OFFENSE_AND_EVENTS: mirroring_events_message = '' elif events_mirrored < min(events_count, events_limit) and failure_message: mirroring_events_message = failure_message elif events_mirrored == events_limit: mirroring_events_message = 'Mirroring events has reached events limit in this incident.' elif events_mirrored == events_count: mirroring_events_message = 'All available events in the offense were mirrored.' return mirroring_events_message def json_loads_inner(json_dumps_list: List[str]) -> list: """ Json load values of list. Args: json_dumps_list: A list with json dumps as nodes. Returns: json loaded list of the json dumps in the original list. """ python_object_list = [] for json_dump in json_dumps_list: try: python_object_list.append(json.loads(json_dump)) except Exception as e: demisto.error(f'Exception {e} when trying to json parse {json_dump}, as part of {json_dumps_list}') raise e return python_object_list def json_dumps_inner(listed_objects: list) -> List[str]: """ Json dump values of list. Args: listed_objects: A list with nodes to be json dumped. Returns: json dumped list of the json dumps in the original list. """ listed_json_dumps = [] for python_object in listed_objects: listed_json_dumps.append(json.dumps(python_object)) return listed_json_dumps def extract_context_data(context_data: dict, include_id: bool = False) -> dict: """Transform the context data from partially json encoded to fully decoded. Args: context_data: The context data. include_id: Whether to include id in the encoding of the data. Returns: The extracted context data. """ new_context_data = context_data.copy() new_context_data.pop(LAST_FETCH_KEY, None) if not new_context_data: new_context_data = {} new_context_data.update({ UPDATED_MIRRORED_OFFENSES_CTX_KEY: json_loads_inner(json.loads( context_data.get(UPDATED_MIRRORED_OFFENSES_CTX_KEY, '[]'))), MIRRORED_OFFENSES_CTX_KEY: json_loads_inner(json.loads(context_data.get(MIRRORED_OFFENSES_CTX_KEY, '[]'))), RESUBMITTED_MIRRORED_OFFENSES_CTX_KEY: json_loads_inner(json.loads( context_data.get(RESUBMITTED_MIRRORED_OFFENSES_CTX_KEY, '[]'))), 'samples': json_loads_inner(json.loads(context_data.get('samples', '[]'))), 'last_mirror_update': json.loads(context_data.get('last_mirror_update', '0')) }) if include_id and LAST_FETCH_KEY in context_data: new_context_data.update({LAST_FETCH_KEY: int(json.loads(context_data.get(LAST_FETCH_KEY, '0')))}) return new_context_data def encode_context_data(context_data: dict, include_id: bool = False) -> dict: """Transform the context data from a decoded python object form to a partially json encoded form. This is done in order to maintain compatibility with the set_to_integration_context_with_retries command. Args: context_data: The context data in its decoded python object form include_id: Whether to include id in the encoding of the data. Returns: The context data in its partially json encoded form. """ new_context_data = context_data.copy() new_context_data.pop('retry_compatible', None) new_context_data.pop(LAST_FETCH_KEY, None) new_context_data.pop(RESET_KEY, None) new_context_data.update({ UPDATED_MIRRORED_OFFENSES_CTX_KEY: json_dumps_inner(context_data.get(UPDATED_MIRRORED_OFFENSES_CTX_KEY, [])), MIRRORED_OFFENSES_CTX_KEY: json_dumps_inner(context_data.get(MIRRORED_OFFENSES_CTX_KEY, [])), RESUBMITTED_MIRRORED_OFFENSES_CTX_KEY: json_dumps_inner(context_data.get(RESUBMITTED_MIRRORED_OFFENSES_CTX_KEY, [])), 'samples': json_dumps_inner(context_data.get('samples', [])), 'last_mirror_update': str(context_data.get('last_mirror_update', 0)) }) if include_id and LAST_FETCH_KEY in context_data: new_context_data.update({LAST_FETCH_KEY: int(context_data.get(LAST_FETCH_KEY, 0))}) return new_context_data @safely_update_context_data def remove_offense_from_context_data(context_data: dict, version: Any, offense_id: str, offense_to_remove: str) -> Tuple[dict, Any, dict]: """Remove an offense from context data UPDATED_MIRRORED_OFFENSES_CTX_KEY and RESUBMITTED_MIRRORED_OFFENSES_CTX_KEY. Args: context_data: The context data to update. version: The version of the context data to update. offense_id: The offense id to remove from RESUBMITTED_MIRRORED_OFFENSES_CTX_KEY. offense_to_remove: The offense to remove from UPDATED_MIRRORED_OFFENSES_CTX_KEY. Returns: (The new context_data, The context_data version the change was based on, The new context_data) """ updated = context_data.get(UPDATED_MIRRORED_OFFENSES_CTX_KEY, []) resubmitted = context_data.get(RESUBMITTED_MIRRORED_OFFENSES_CTX_KEY, []) if offense_to_remove and offense_to_remove in updated: updated.remove(offense_to_remove) if offense_id in resubmitted: resubmitted.remove(offense_id) context_data[RESUBMITTED_MIRRORED_OFFENSES_CTX_KEY] = resubmitted context_data[UPDATED_MIRRORED_OFFENSES_CTX_KEY] = updated return encode_context_data(context_data), version, context_data def get_remote_data_command(client: Client, params: Dict[str, Any], args: Dict) -> GetRemoteDataResponse: """ get-remote-data command: Returns an updated incident and entries If offense's events were updated in the long running container, update the demisto incident. Args: client (Client): QRadar client to perform the API calls. params (Dict): Demisto params. args (Dict): id: Offense id to retrieve. lastUpdate: When was the last time we data was retrieved in Epoch. Returns: GetRemoteDataResponse. """ print_debug_msg("Started GetRemoteData") remote_args = GetRemoteDataArgs(args) ip_enrich, asset_enrich = get_offense_enrichment(params.get('enrichment', 'IPs And Assets')) offense_id = remote_args.remote_incident_id # if this call fails, raise an error and stop command execution offense = client.offenses_list(offense_id=offense_id) offense_last_update = get_time_parameter(offense.get('last_persisted_time')) mirror_options = params.get('mirror_options') raw_context, context_version = get_integration_context_with_version() context_data = extract_context_data(raw_context.copy()) events_limit = int(params.get('events_limit') or DEFAULT_EVENTS_LIMIT) print_mirror_events_stats(context_data, f"Starting Get Remote Data For " f"Offense {str(offense.get('id'))}") demisto.debug(f'Updating offense. Offense last update was {offense_last_update}') entries = [] if offense.get('status') == 'CLOSED' and argToBoolean(params.get('close_incident', False)): demisto.debug(f'Offense is closed: {offense}') try: if closing_reason := offense.get('closing_reason_id', ''): closing_reason = client.closing_reasons_list(closing_reason).get('text') offense_close_time = offense.get('close_time', '') closed_offense_notes = client.offense_notes_list(offense_id, f'items={DEFAULT_RANGE_VALUE}', filter_=f'create_time >= {offense_close_time}') # In QRadar UI, when you close a reason, a note is added with the reason and more details. Try to get note # if exists, else fallback to closing reason only, as closing QRadar through an API call does not create a note. close_reason_with_note = next((note.get('note_text') for note in closed_offense_notes if note.get('note_text').startswith('This offense was closed with reason:')), closing_reason) if not close_reason_with_note: print_debug_msg(f'Could not find closing reason or closing note for offense with offense id {offense_id}') close_reason_with_note = 'Unknown closing reason from QRadar' else: close_reason_with_note = f'From QRadar: {close_reason_with_note}' except Exception as e: demisto.error(f'Failed to get closing reason with error: {e}') close_reason_with_note = 'Unknown closing reason from QRadar' entries.append({ 'Type': EntryType.NOTE, 'Contents': { 'dbotIncidentClose': True, 'closeReason': close_reason_with_note }, 'ContentsFormat': EntryFormat.JSON }) failure_message = 'Failed communicating with long running container.' if mirror_options == MIRROR_OFFENSE_AND_EVENTS: offenses_waiting_for_update = context_data.get(MIRRORED_OFFENSES_CTX_KEY, []) max_retries = min(MAX_FETCH_EVENT_RETIRES * (len(offenses_waiting_for_update) + 3), 20) offense_to_remove = None is_waiting_to_be_updated = True evented_offense = None retries = 0 while ((not evented_offense) or is_waiting_to_be_updated) and retries < max_retries: if retries != 0: time.sleep(FAILURE_SLEEP) raw_context, context_version = get_integration_context_with_version() context_data = extract_context_data(raw_context.copy()) print_mirror_events_stats(context_data, f"Get Remote Data Loop for id {offense.get('id')}, retry {retries}") retries += 1 offenses_with_updated_events = context_data.get(UPDATED_MIRRORED_OFFENSES_CTX_KEY, []) offenses_waiting_for_update = context_data.get(MIRRORED_OFFENSES_CTX_KEY, []) evented_offense = [evented_offense for evented_offense in offenses_with_updated_events if str(evented_offense.get('id')) == str(offense.get("id"))] is_waiting_to_be_updated = any([True for waiting_offense in offenses_waiting_for_update if str(waiting_offense.get('id')) == str(offense.get("id"))]) if evented_offense: demisto.debug(f"Mirror Events: Offense {offense.get('id')} events were updated, updating incident.") if evented_offense[0].get('events'): offense['events'] = evented_offense[0].get('events') failure_message = evented_offense[0].get('mirroring_events_message', '') demisto.debug(f"Mirror Events: Offense {offense.get('id')} now has {len(offense.get('events'))} " f"fetched events. Mirror message: {failure_message}") offense_to_remove = evented_offense[0] elif is_waiting_to_be_updated: failure_message = 'In queue.' new_context_data = remove_offense_from_context_data(offense_id=offense_id, offense_to_remove=offense_to_remove, version=context_version, context_data=context_data) print_mirror_events_stats(new_context_data, f"Get Remote Data End for id {offense.get('id')}") enriched_offense = enrich_offenses_result(client, offense, ip_enrich, asset_enrich) final_offense_data = sanitize_outputs(enriched_offense)[0] events_message = update_events_mirror_message( mirror_options=mirror_options, events_limit=events_limit, failure_message=failure_message, events_count=int(final_offense_data.get('event_count', 0)), events_mirrored=len(final_offense_data.get('events', []))) final_offense_data['last_mirror_in_time'] = datetime.now().isoformat() final_offense_data['mirroring_events_message'] = events_message return GetRemoteDataResponse(final_offense_data, entries) @safely_update_context_data def add_modified_remote_offenses(context_data: dict, version: str, mirror_options: str, new_modified_records_ids: list, current_last_update: str, offenses: list) -> Tuple[dict, str, list]: """Add modified remote offenses to context_data and handle exhausted offenses. Args: context_data: The context data to update. version: The version of the context data to update. mirror_options: The mirror options for the integration. new_modified_records_ids: The new modified offenses ids. current_last_update: The current last mirror update. offenses: The offenses to update. Returns: (The new context data, The context_data version the changes were based on, The new modified records ids) """ new_context_data = context_data.copy() print_debug_msg(f'Saving New Highest ID: {context_data.get(LAST_FETCH_KEY, 0)}') new_context_data.update({'last_mirror_update': current_last_update}) if mirror_options == MIRROR_OFFENSE_AND_EVENTS: print_mirror_events_stats(new_context_data, "Get Modified Remote Data - Before update") mirrored_offenses = merge_lists(original_list=context_data.get(MIRRORED_OFFENSES_CTX_KEY, []), updated_list=offenses, key='id') new_context_data.update({MIRRORED_OFFENSES_CTX_KEY: mirrored_offenses}) remaining_resubmitted_offenses = context_data.get(RESUBMITTED_MIRRORED_OFFENSES_CTX_KEY, []).copy() updated_mirrored_offenses = context_data.get(UPDATED_MIRRORED_OFFENSES_CTX_KEY, []) clean_updates_mirrored_offenses = updated_mirrored_offenses.copy() if remaining_resubmitted_offenses: for offense in updated_mirrored_offenses: if str(offense.get("id")) in remaining_resubmitted_offenses: print_debug_msg(f"Removing Offense id {offense.get('id')} from processing Mirrored Events " f"since its incident is not responding. (It is probably closed)") clean_updates_mirrored_offenses.remove(offense) new_context_data.update({UPDATED_MIRRORED_OFFENSES_CTX_KEY: clean_updates_mirrored_offenses}) new_context_data.update({RESUBMITTED_MIRRORED_OFFENSES_CTX_KEY: []}) clean_updates_mirrored_offenses_ids = [str(offense.get('id')) for offense in clean_updates_mirrored_offenses] if clean_updates_mirrored_offenses_ids: new_modified_records_ids = list(set(new_modified_records_ids + clean_updates_mirrored_offenses_ids)) new_context_data.update({RESUBMITTED_MIRRORED_OFFENSES_CTX_KEY: clean_updates_mirrored_offenses_ids}) print_mirror_events_stats(new_context_data, "Get Modified Remote Data - After update") return encode_context_data(new_context_data, include_id=False), version, new_modified_records_ids def get_modified_remote_data_command(client: Client, params: Dict[str, str], args: Dict[str, str]) -> GetModifiedRemoteDataResponse: """ Performs API calls to QRadar service, querying for offenses that were updated in QRadar later than the last update time given in the argument 'lastUpdate'. Args: client (Client): QRadar client to perform the API calls. params (Dict): Demisto params. args (Dict): Demisto arguments. Returns: (GetModifiedRemoteDataResponse): IDs of the offenses that have been modified in QRadar. """ raw_ctx, ctx_version = get_integration_context_with_version() ctx = extract_context_data(raw_ctx, include_id=True) remote_args = GetModifiedRemoteDataArgs(args) highest_fetched_id = ctx.get(LAST_FETCH_KEY, 0) limit: int = int(params.get('mirror_limit', MAXIMUM_MIRROR_LIMIT)) range_ = f'items=0-{limit - 1}' last_update_time = ctx.get('last_mirror_update', 0) if not last_update_time: last_update_time = remote_args.last_update last_update = get_time_parameter(last_update_time, epoch_format=True) # if this call fails, raise an error and stop command execution offenses = client.offenses_list(range_=range_, filter_=f'id <= {highest_fetched_id} AND last_persisted_time > {last_update}', sort='+last_persisted_time', fields='id,start_time,event_count,last_persisted_time') new_modified_records_ids = [str(offense.get('id')) for offense in offenses if 'id' in offense] current_last_update = last_update if not offenses else offenses[-1].get('last_persisted_time') new_modified_records_ids = add_modified_remote_offenses(context_data=ctx, version=ctx_version, mirror_options=params.get('mirror_options'), new_modified_records_ids=new_modified_records_ids, current_last_update=current_last_update, offenses=offenses) return GetModifiedRemoteDataResponse(new_modified_records_ids) def clear_integration_ctx(ctx: dict) -> dict: """Return a cleared context_data dict so set_integration_context could be called on it. Calling set_integration_context with the output of this function ensures the next call to set_to_integration_context_with_retries will not fail. Args: ctx: The context_data to simplify Returns: The cleared context_data """ fetch_id_ctx: str = ctx.get(LAST_FETCH_KEY) or '0' try: fetch_id = int(fetch_id_ctx) except ValueError: try: fetch_id = int(json.loads(fetch_id_ctx)) except ValueError: print_debug_msg(f"Could not retrive LAST_FETCH_KEY from {fetch_id_ctx} Setting to 0") fetch_id = 0 last_update_ctx: str = ctx.get('last_mirror_update') or '0' try: last_update = str(int(last_update_ctx)) except ValueError: try: last_update = str(int(json.loads(last_update_ctx))) except ValueError: print_debug_msg(f"Could not retrive last_mirror_update from {last_update_ctx} Setting to '0'") last_update = '0' return {LAST_FETCH_KEY: json.dumps(fetch_id), 'last_mirror_update': json.dumps(last_update), UPDATED_MIRRORED_OFFENSES_CTX_KEY: '[]', MIRRORED_OFFENSES_CTX_KEY: '[]', RESUBMITTED_MIRRORED_OFFENSES_CTX_KEY: '[]', 'samples': '[]'} def change_ctx_to_be_compatible_with_retry() -> None: """ In order to move QRadar from using set_integration_context to set_to_integration_context_with_retries, the fields need to change to JSON strings. Change is required due to race condition occurring between get-modified-remote-data to long-running-execution. Because some customers already have instances running where fields are not JSON fields, this function is needed to make them be compatible with new changes. Returns: (None): Modifies context to be compatible. """ ctx = get_integration_context() new_ctx = ctx.copy() try: extracted_ctx = extract_context_data(ctx) print_mirror_events_stats(extracted_ctx, "Checking ctx") print_debug_msg("ctx was found to be compatible with retries") extract_works = True except Exception as e: print_debug_msg(f"extracting ctx {ctx} failed, trying to make it retry compatible. Error was: {str(e)}") extract_works = False if not extract_works: cleared_ctx = clear_integration_ctx(new_ctx) print_debug_msg(f"Change ctx context data was cleared and changing to {cleared_ctx}") set_integration_context(cleared_ctx) print_debug_msg(f"Change ctx context data was cleared and changed to {cleared_ctx}") ''' MAIN FUNCTION ''' def main() -> None: params = demisto.params() command = demisto.command() args = demisto.args() # handle allowed advanced parameters adv_params = params.get('adv_params') if adv_params: try: globals_ = globals() for adv_p in adv_params.split(','): adv_p_kv = [item.strip() for item in adv_p.split('=')] if len(adv_p_kv) != 2: raise DemistoException( f'Failed to parse advanced parameter: {adv_p} - please make sure you entered it correctly.') adv_param_name = adv_p_kv[0] if adv_param_name in ADVANCED_PARAMETERS_STRING_NAMES: globals_[adv_p_kv[0]] = adv_p_kv[1] elif adv_param_name in ADVANCED_PARAMETER_INT_NAMES: globals_[adv_p_kv[0]] = int(adv_p_kv[1]) else: raise DemistoException( f'The parameter: {adv_p_kv[0]} is not a valid advanced parameter. Please remove it') except DemistoException as e: raise DemistoException(f'Failed to parse advanced params. Error: {e.message}') except Exception as e: raise DemistoException(f'Failed to parse advanced params. Error: {e}') server = params.get('server') verify_certificate = not params.get('insecure', False) proxy = params.get('proxy', False) api_version = params.get('api_version') if float(api_version) < MINIMUM_API_VERSION: raise DemistoException(f'API version cannot be lower than {MINIMUM_API_VERSION}') credentials = params.get('credentials') try: client = Client( server=server, verify=verify_certificate, proxy=proxy, api_version=api_version, credentials=credentials) # All command names with or are for supporting QRadar v2 command names for backward compatibility if command == 'test-module': return_results(test_module_command(client, params)) elif command == 'fetch-incidents': demisto.incidents(fetch_incidents_command()) elif command == 'long-running-execution': change_ctx_to_be_compatible_with_retry() support_multithreading() long_running_execution_command(client, params) elif command == 'qradar-offenses-list' or command == 'qradar-offenses' or command == 'qradar-offense-by-id': return_results(qradar_offenses_list_command(client, args)) elif command == 'qradar-offense-update' or command == 'qradar-update-offense': return_results(qradar_offense_update_command(client, args)) elif command == 'qradar-closing-reasons' or command == 'qradar-get-closing-reasons': return_results(qradar_closing_reasons_list_command(client, args)) elif command == 'qradar-offense-notes-list' or command == 'qradar-get-note': return_results(qradar_offense_notes_list_command(client, args)) elif command == 'qradar-offense-note-create' or command == 'qradar-create-note': return_results(qradar_offense_notes_create_command(client, args)) elif command == 'qradar-rules-list': return_results(qradar_rules_list_command(client, args)) elif command == 'qradar-rule-groups-list': return_results(qradar_rule_groups_list_command(client, args)) elif command == 'qradar-assets-list' or command == 'qradar-get-assets' or command == 'qradar-get-asset-by-id': return_results(qradar_assets_list_command(client, args)) elif command == 'qradar-saved-searches-list': return_results(qradar_saved_searches_list_command(client, args)) elif command == 'qradar-searches-list': return_results(qradar_searches_list_command(client, args)) elif command == 'qradar-search-create' or command == 'qradar-searches': return_results(qradar_search_create_command(client, args)) elif command == 'qradar-search-status-get' or command == 'qradar-get-search': return_results(qradar_search_status_get_command(client, args)) elif command == 'qradar-search-results-get' or command == 'qradar-get-search-results': return_results(qradar_search_results_get_command(client, args)) elif command == 'qradar-reference-sets-list' or command == 'qradar-get-reference-by-name': return_results(qradar_reference_sets_list_command(client, args)) elif command == 'qradar-reference-set-create' or command == 'qradar-create-reference-set': return_results(qradar_reference_set_create_command(client, args)) elif command == 'qradar-reference-set-delete' or command == 'qradar-delete-reference-set': return_results(qradar_reference_set_delete_command(client, args)) elif command == 'qradar-reference-set-value-upsert' or command == 'qradar-create-reference-set-value' or \ command == 'qradar-update-reference-set-value': return_results(qradar_reference_set_value_upsert_command(client, args)) elif command == 'qradar-reference-set-value-delete' or command == 'qradar-delete-reference-set-value': return_results(qradar_reference_set_value_delete_command(client, args)) elif command == 'qradar-domains-list' or command == 'qradar-get-domains' or \ command == 'qradar-get-domain-by-id': return_results(qradar_domains_list_command(client, args)) elif command == 'qradar-indicators-upload' or command == 'qradar-upload-indicators': return_results(qradar_indicators_upload_command(client, args)) elif command == 'qradar-geolocations-for-ip': return_results(qradar_geolocations_for_ip_command(client, args)) elif command == 'qradar-log-sources-list': return_results(qradar_log_sources_list_command(client, args)) elif command == 'qradar-get-custom-properties': return_results(qradar_get_custom_properties_command(client, args)) elif command == 'qradar-ips-source-get': return_results(qradar_ips_source_get_command(client, args)) elif command == 'qradar-ips-local-destination-get': return_results(qradar_ips_local_destination_get_command(client, args)) elif command == 'qradar-reset-last-run': return_results(qradar_reset_last_run_command()) elif command == 'get-mapping-fields': return_results(qradar_get_mapping_fields_command(client)) elif command == 'get-remote-data': change_ctx_to_be_compatible_with_retry() return_results(get_remote_data_command(client, params, args)) elif command == 'get-modified-remote-data': change_ctx_to_be_compatible_with_retry() return_results(get_modified_remote_data_command(client, params, args)) else: raise NotImplementedError(f'''Command '{command}' is not implemented.''') # Log exceptions and return errors except Exception as e: demisto.error(traceback.format_exc()) # print the traceback print_debug_msg(f"The integration context_data is {get_integration_context()}") return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}') ''' ENTRY POINT ''' if __name__ in ('__main__', '__builtin__', 'builtins'): register_signal_handler_profiling_dump(profiling_dump_rows_limit=PROFILING_DUMP_ROWS_LIMIT) main()
import petscii import bisect from util import to_word, to_bytes # fmt: off basic_tokens = ( "END", "FOR", "NEXT", "DATA", "INPUT#", "INPUT", "DIM", "READ", "LET", "GOTO", "RUN", "IF", "RESTORE", "GOSUB", "RETURN", "REM", "STOP", "ON", "WAIT", "LOAD", "SAVE", "VERIFY", "DEF", "POKE", "PRINT#", "PRINT", "CONT", "LIST", "CLR", "CMD", "SYS", "OPEN", "CLOSE", "GET", "NEW", "TAB(", "TO", "FN", "SPC(", "THEN", "NOT", "STEP", "+", "-", "*", "/", "↑", "AND", "OR", ">", "=", "<", "SGN", "INT", "ABS", "USR", "FRE", "POS", "SQR", "RND", "LOG", "EXP", "COS", "SIN", "TAN", "ATN", "PEEK", "LEN", "STR$", "VAL", "ASC", "CHR$", "LEFT$", "RIGHT$", "MID$", "π" ) # fmt: on # fmt: off operand_formats = { "#": "#{}", # immediate "A": "a", # accumulator "I": "", # implied "s": "", # stack "i": "({})", # jmp (indirect) "ix": "({},x)", # (indirect,x) "iy": "({}),y", # (indirect),y "z": "{}", # zeropage "zx": "{},x", # zeropage,x "zy": "{},y", # zeropage,y "a": "{}", # absolute "ax": "{},x", # absolute,x "ay": "{},y", # absolute,y "r": "{}", # relative } # fmt: on class Opcode: def __init__(self, mnemonic, mode, length, cycles, variable, undocumented): self.mnemonic = mnemonic self.mode = mode self.length = length self.cycles = cycles self.variable = variable self.undocumented = undocumented opcodes = ( Opcode("brk", "s", 2, 7, False, False), Opcode("ora", "ix", 2, 6, False, False), Opcode("jam", "I", 1, 0, False, True), Opcode("slo", "ix", 2, 8, False, True), Opcode("nop", "z", 2, 3, False, True), Opcode("ora", "z", 2, 3, False, False), Opcode("asl", "z", 2, 5, False, False), Opcode("slo", "z", 2, 5, False, True), Opcode("php", "s", 1, 3, False, False), Opcode("ora", "#", 2, 2, False, False), Opcode("asl", "A", 1, 2, False, False), Opcode("anc", "#", 2, 2, False, True), Opcode("nop", "a", 3, 4, False, True), Opcode("ora", "a", 3, 4, False, False), Opcode("asl", "a", 3, 6, False, False), Opcode("slo", "a", 3, 6, False, True), Opcode("bpl", "r", 2, 2, True, False), Opcode("ora", "iy", 2, 5, True, False), Opcode("jam", "I", 1, 0, False, True), Opcode("slo", "iy", 2, 8, False, True), Opcode("nop", "zx", 2, 4, False, True), Opcode("ora", "zx", 2, 4, False, False), Opcode("asl", "zx", 2, 6, False, False), Opcode("slo", "zx", 2, 6, False, True), Opcode("clc", "I", 1, 2, False, False), Opcode("ora", "ay", 3, 4, True, False), Opcode("nop", "I", 1, 2, False, True), Opcode("slo", "ay", 3, 7, False, True), Opcode("nop", "ax", 3, 4, True, True), Opcode("ora", "ax", 3, 4, True, False), Opcode("asl", "ax", 3, 7, False, False), Opcode("slo", "ax", 3, 7, False, True), Opcode("jsr", "a", 3, 6, False, False), Opcode("and", "ix", 2, 6, False, False), Opcode("jam", "I", 1, 0, False, True), Opcode("rla", "ix", 2, 8, False, True), Opcode("bit", "z", 2, 3, False, False), Opcode("and", "z", 2, 3, False, False), Opcode("rol", "z", 2, 5, False, False), Opcode("rla", "z", 2, 5, False, True), Opcode("plp", "s", 1, 4, False, False), Opcode("and", "#", 2, 2, False, False), Opcode("rol", "A", 1, 2, False, False), Opcode("anc", "#", 2, 2, False, True), Opcode("bit", "a", 3, 4, False, False), Opcode("and", "a", 3, 4, False, False), Opcode("rol", "a", 3, 6, False, False), Opcode("rla", "a", 3, 6, False, True), Opcode("bmi", "r", 2, 2, True, False), Opcode("and", "iy", 2, 5, True, False), Opcode("jam", "I", 1, 0, False, True), Opcode("rla", "iy", 2, 8, False, True), Opcode("nop", "zx", 2, 4, False, True), Opcode("and", "zx", 2, 4, False, False), Opcode("rol", "zx", 2, 6, False, False), Opcode("rla", "zx", 2, 6, False, True), Opcode("sec", "I", 1, 2, False, False), Opcode("and", "ay", 3, 4, True, False), Opcode("nop", "I", 1, 2, False, True), Opcode("rla", "ay", 3, 7, False, True), Opcode("nop", "ax", 3, 4, True, True), Opcode("and", "ax", 3, 4, True, False), Opcode("rol", "ax", 3, 7, False, False), Opcode("rla", "ax", 3, 7, False, True), Opcode("rti", "s", 1, 6, False, False), Opcode("eor", "ix", 2, 6, False, False), Opcode("jam", "I", 1, 0, False, True), Opcode("sre", "ix", 2, 8, False, True), Opcode("nop", "z", 2, 3, False, True), Opcode("eor", "z", 2, 3, False, False), Opcode("lsr", "z", 2, 5, False, False), Opcode("sre", "z", 2, 5, False, True), Opcode("pha", "s", 1, 3, False, False), Opcode("eor", "#", 2, 2, False, False), Opcode("lsr", "A", 1, 2, False, False), Opcode("asr", "#", 2, 2, False, True), Opcode("jmp", "a", 3, 3, False, False), Opcode("eor", "a", 3, 4, False, False), Opcode("lsr", "a", 3, 6, False, False), Opcode("sre", "a", 3, 6, False, True), Opcode("bvc", "r", 2, 2, True, False), Opcode("eor", "iy", 2, 5, True, False), Opcode("jam", "I", 1, 0, False, True), Opcode("sre", "iy", 2, 8, False, True), Opcode("nop", "zx", 2, 4, False, True), Opcode("eor", "zx", 2, 4, False, False), Opcode("lsr", "zx", 2, 6, False, False), Opcode("sre", "zx", 2, 6, False, True), Opcode("cli", "I", 1, 2, False, False), Opcode("eor", "ay", 3, 4, True, False), Opcode("nop", "I", 1, 2, False, True), Opcode("sre", "ay", 3, 7, False, True), Opcode("nop", "ax", 3, 4, True, True), Opcode("eor", "ax", 3, 4, True, False), Opcode("lsr", "ax", 3, 7, False, False), Opcode("sre", "ax", 3, 7, False, True), Opcode("rts", "s", 1, 6, False, False), Opcode("adc", "ix", 2, 6, False, False), Opcode("jam", "I", 1, 0, False, True), Opcode("rra", "ix", 2, 8, False, True), Opcode("nop", "z", 2, 3, False, True), Opcode("adc", "z", 2, 3, False, False), Opcode("ror", "z", 2, 5, False, False), Opcode("rra", "z", 2, 5, False, True), Opcode("pla", "s", 1, 4, False, False), Opcode("adc", "#", 2, 2, False, False), Opcode("ror", "A", 1, 2, False, False), Opcode("arr", "#", 2, 2, False, True), Opcode("jmp", "i", 3, 5, False, False), Opcode("adc", "a", 3, 4, False, False), Opcode("ror", "a", 3, 6, False, False), Opcode("rra", "a", 3, 6, False, True), Opcode("bvs", "r", 2, 2, True, False), Opcode("adc", "iy", 2, 5, True, False), Opcode("jam", "I", 1, 0, False, True), Opcode("rra", "iy", 2, 8, False, True), Opcode("nop", "zx", 2, 4, False, True), Opcode("adc", "zx", 2, 4, False, False), Opcode("ror", "zx", 2, 6, False, False), Opcode("rra", "zx", 2, 6, False, True), Opcode("sei", "I", 1, 2, False, False), Opcode("adc", "ay", 3, 4, True, False), Opcode("nop", "I", 1, 2, False, True), Opcode("rra", "ay", 3, 7, False, True), Opcode("nop", "ax", 3, 4, True, True), Opcode("adc", "ax", 3, 4, True, False), Opcode("ror", "ax", 3, 7, False, False), Opcode("rra", "ax", 3, 7, False, True), Opcode("nop", "#", 2, 2, False, True), Opcode("sta", "ix", 2, 6, False, False), Opcode("nop", "#", 2, 2, False, True), Opcode("sax", "ix", 2, 6, False, True), Opcode("sty", "z", 2, 3, False, False), Opcode("sta", "z", 2, 3, False, False), Opcode("stx", "z", 2, 3, False, False), Opcode("sax", "z", 2, 3, False, True), Opcode("dey", "I", 1, 2, False, False), Opcode("nop", "#", 2, 2, False, True), Opcode("txa", "I", 1, 2, False, False), Opcode("ane", "#", 2, 2, False, True), Opcode("sty", "a", 3, 4, False, False), Opcode("sta", "a", 3, 4, False, False), Opcode("stx", "a", 3, 4, False, False), Opcode("sax", "a", 3, 4, False, True), Opcode("bcc", "r", 2, 2, True, False), Opcode("sta", "iy", 2, 6, False, False), Opcode("jam", "I", 1, 0, False, True), Opcode("sha", "ax", 3, 5, False, True), Opcode("sty", "zx", 2, 4, False, False), Opcode("sta", "zx", 2, 4, False, False), Opcode("stx", "zy", 2, 4, False, False), Opcode("sax", "zy", 2, 4, False, True), Opcode("tya", "I", 1, 2, False, False), Opcode("sta", "ay", 3, 5, False, False), Opcode("txs", "I", 1, 2, False, False), Opcode("shs", "ax", 3, 5, False, True), Opcode("shy", "ay", 3, 5, False, True), Opcode("sta", "ax", 3, 5, False, False), Opcode("shx", "ay", 3, 5, False, True), Opcode("sha", "ay", 3, 5, False, True), Opcode("ldy", "#", 2, 2, False, False), Opcode("lda", "ix", 2, 6, False, False), Opcode("ldx", "#", 2, 2, False, False), Opcode("lax", "ix", 2, 6, False, True), Opcode("ldy", "z", 2, 3, False, False), Opcode("lda", "z", 2, 3, False, False), Opcode("ldx", "z", 2, 3, False, False), Opcode("lax", "z", 2, 3, False, True), Opcode("tay", "I", 1, 2, False, False), Opcode("lda", "#", 2, 2, False, False), Opcode("tax", "I", 1, 2, False, False), Opcode("lxa", "#", 2, 2, False, True), Opcode("ldy", "a", 3, 4, False, False), Opcode("lda", "a", 3, 4, False, False), Opcode("ldx", "a", 3, 4, False, False), Opcode("lax", "a", 3, 4, False, True), Opcode("bcs", "r", 2, 2, True, False), Opcode("lda", "iy", 2, 5, True, False), Opcode("jam", "I", 1, 0, False, True), Opcode("lax", "iy", 2, 5, True, True), Opcode("ldy", "zx", 2, 4, False, False), Opcode("lda", "zx", 2, 4, False, False), Opcode("ldx", "zy", 2, 4, False, False), Opcode("lax", "zy", 2, 4, False, True), Opcode("clv", "I", 1, 2, False, False), Opcode("lda", "ay", 3, 4, True, False), Opcode("tsx", "I", 1, 2, False, False), Opcode("lae", "ay", 3, 4, True, True), Opcode("ldy", "ax", 3, 4, False, False), Opcode("lda", "ax", 3, 4, True, False), Opcode("ldx", "ay", 3, 4, True, False), Opcode("lax", "ay", 3, 4, True, True), Opcode("cpy", "#", 2, 2, False, False), Opcode("cmp", "ix", 2, 6, False, False), Opcode("nop", "#", 2, 2, False, True), Opcode("dcp", "ix", 2, 8, False, True), Opcode("cpy", "z", 2, 3, False, False), Opcode("cmp", "z", 2, 3, False, False), Opcode("dec", "z", 2, 5, False, False), Opcode("dcp", "z", 2, 5, False, True), Opcode("iny", "I", 1, 2, False, False), Opcode("cmp", "#", 2, 2, False, False), Opcode("dex", "I", 1, 2, False, False), Opcode("sbx", "#", 2, 2, False, True), Opcode("cpy", "a", 3, 4, False, False), Opcode("cmp", "a", 3, 4, False, False), Opcode("dec", "a", 3, 4, False, False), Opcode("dcp", "a", 3, 6, False, True), Opcode("bne", "r", 2, 2, True, False), Opcode("cmp", "iy", 2, 5, True, False), Opcode("jam", "I", 1, 0, False, True), Opcode("dcp", "iy", 2, 8, False, True), Opcode("nop", "zx", 2, 4, False, True), Opcode("cmp", "zx", 2, 4, False, False), Opcode("dec", "zx", 2, 6, False, False), Opcode("dcp", "zx", 2, 6, False, True), Opcode("cld", "I", 1, 2, False, False), Opcode("cmp", "ay", 3, 4, True, False), Opcode("nop", "I", 1, 2, False, True), Opcode("dcp", "ay", 3, 7, False, True), Opcode("nop", "ax", 3, 4, True, True), Opcode("cmp", "ax", 3, 4, True, False), Opcode("dec", "ax", 3, 7, False, False), Opcode("dcp", "ax", 3, 7, False, True), Opcode("cpx", "#", 2, 2, False, False), Opcode("sbc", "ix", 2, 6, False, False), Opcode("nop", "#", 2, 2, False, True), Opcode("isb", "ix", 2, 8, False, True), Opcode("cpx", "z", 2, 3, False, False), Opcode("sbc", "z", 2, 3, False, False), Opcode("inc", "z", 2, 5, False, False), Opcode("isb", "z", 2, 5, False, True), Opcode("inx", "I", 1, 2, False, False), Opcode("sbc", "#", 2, 2, False, False), Opcode("nop", "I", 1, 2, False, False), Opcode("sbc", "#", 2, 2, False, True), Opcode("cpx", "a", 3, 4, False, False), Opcode("sbc", "a", 3, 4, False, False), Opcode("inc", "a", 3, 6, False, False), Opcode("isb", "a", 3, 6, False, True), Opcode("beq", "r", 2, 2, True, False), Opcode("sbc", "iy", 2, 5, True, False), Opcode("jam", "I", 1, 0, False, True), Opcode("isb", "iy", 2, 8, False, True), Opcode("nop", "zx", 2, 4, False, True), Opcode("sbc", "zx", 2, 4, False, False), Opcode("inc", "zx", 2, 6, False, False), Opcode("isb", "zx", 2, 6, False, True), Opcode("sed", "I", 1, 2, False, False), Opcode("sbc", "ay", 3, 4, True, False), Opcode("nop", "I", 1, 2, False, True), Opcode("isb", "ay", 3, 7, False, True), Opcode("nop", "ax", 3, 4, True, True), Opcode("sbc", "ax", 3, 4, True, False), Opcode("inc", "ax", 3, 7, False, False), Opcode("isb", "ax", 3, 7, False, True), ) class AsmInstr: def __init__(self, addr, data): self.addr = addr self.bytes = bytes(data) self.opcode = opcodes[data[0]] if len(data) == 3: self.operand = to_word(data[1:]) elif len(data) == 2: self.operand = self.bytes[1] if self.opcode.mode == "r": if self.operand & 0x80: self.operand = (self.operand & 0x7F) - 0x80 self.operand += self.addr else: self.operand = None def format(self, symbols=None, lower=True, addr=True, bytes=True): if self.operand is not None: if symbols and self.operand in symbols: sym = symbols[self.operand] else: if self.opcode.length == 2: if self.opcode.mode == "r": sym = f"${self.operand:04x}" else: sym = f"${self.operand:02x}" elif self.opcode.length == 3: sym = f"${self.operand:04x}" if not lower: sym = sym.upper() else: sym = "" nem = self.opcode.mnemonic fmt = operand_formats[self.opcode.mode] if not lower: nem = nem.upper() fmt = fmt.upper() text = "" if addr: text += f"{self.addr:04x} " if bytes: text += f"{self.bytes.hex(" "): <9} " text += f"{nem} {fmt.format(sym)}" return text def __str__(self): return self.format() def __repr__(self): classname = self.__class__.__name__ return f"{classname}(0x{self.addr:04x}, {self.bytes})" def __lt__(self, other): return self.addr < other.addr class BasicLine: def __init__(self, addr, bytes): self.addr = addr self.bytes = bytes self.link = to_word(bytes) self.lineno = to_word(bytes[2:]) def format(self, lower=False): line = [f"{self.lineno} "] for byte in self.bytes[4:-1]: if byte >= 0x80 and byte <= 0xCA: token = basic_tokens[byte - 0x80] line.append(token.lower() if lower else token) elif byte == 0xFF: line.append(basic_tokens[-1]) # pi else: line.append(petscii.to_unicode(bytes([byte]), lower)) return "".join(line) def syscalls(self): addrs = [] try: tok = self.bytes[4:] while True: beg = tok.index(0x9e) + 1 while tok[beg] == 0x20: beg += 1 end = beg while tok[end] >= 0x30 and tok[end] <= 0x39: end += 1 addrs.append(int(tok[beg:end])) tok = tok[end:] except Exception as e: pass return addrs def __str__(self): return self.format() def __repr__(self): classname = self.__class__.__name__ return f"{classname}(0x{self.addr:04x}, {self.bytes})" def __lt__(self, other): return self.addr < other.addr class Data: def __init__(self, addr, bytes): self.addr = addr self.bytes = bytes def format(self): lines = [] for i in range(len(0, self.data), 16): lines.append(f"{self.addr+i} {self.data[i:i+16].hex(" ")}") return "\n".join(lines) def __str__(self): return self.format() def __lt__(self, other): return self.addr < other.addr branchops = {'bvs', 'bcs', 'beq', 'bmi', 'bcc', 'bne', 'bpl', 'bvc', 'jsr'} class State: def __init__(self): self.mem = bytearray(64*1024) self.claimed = set() self.blocks = [] def load_prg(self, data): load = to_word(data) data = data[2:] self.mem[load:load+len(data)] = data if load == 0x801: self.parse_basic(0x801) def parse_basic(self, addr): prev = None while True: it = iter(self.mem[addr:]) try: line = [] for _ in range(4): line.append(next(it)) link = to_word(line) if link == 0: break b = 0xFF while b != 0: b = next(it) line.append(b) cur = BasicLine(addr, bytes(line)) self.insert_block(cur) if prev is not None: prev.next = cur addr = link except StopIteration: break def trace_asm(self, start): heads = set() next = start while True: op = opcodes[self.mem[next]] instr = AsmInstr(next, self.mem[next:next+op.length]) next += op.length instaddrs = set(range(instr.addr, next)) if ( op.mnemonic in ("rts", "brk") or op.undocumented or self.claimed & instaddrs ): if len(heads) > 0: next = heads.pop() else: break else: self.claimed.update(instaddrs) self.insert_block(instr) if op.mnemonic == "jmp": if op.mode == "a": next = instr.operand elif op.mode == "i": next = to_word(self.mem[instr.operand]) elif op.mnemonic in branchops: heads.add(instr.operand) def insert_block(self, block): bisect.insort_right(self.blocks, block) def __str__(self): return "\n".join(str(b) for b in self.blocks)
import petscii import bisect from util import to_word, to_bytes # fmt: off basic_tokens = ( "END", "FOR", "NEXT", "DATA", "INPUT#", "INPUT", "DIM", "READ", "LET", "GOTO", "RUN", "IF", "RESTORE", "GOSUB", "RETURN", "REM", "STOP", "ON", "WAIT", "LOAD", "SAVE", "VERIFY", "DEF", "POKE", "PRINT#", "PRINT", "CONT", "LIST", "CLR", "CMD", "SYS", "OPEN", "CLOSE", "GET", "NEW", "TAB(", "TO", "FN", "SPC(", "THEN", "NOT", "STEP", "+", "-", "*", "/", "↑", "AND", "OR", ">", "=", "<", "SGN", "INT", "ABS", "USR", "FRE", "POS", "SQR", "RND", "LOG", "EXP", "COS", "SIN", "TAN", "ATN", "PEEK", "LEN", "STR$", "VAL", "ASC", "CHR$", "LEFT$", "RIGHT$", "MID$", "π" ) # fmt: on # fmt: off operand_formats = { "#": "#{}", # immediate "A": "a", # accumulator "I": "", # implied "s": "", # stack "i": "({})", # jmp (indirect) "ix": "({},x)", # (indirect,x) "iy": "({}),y", # (indirect),y "z": "{}", # zeropage "zx": "{},x", # zeropage,x "zy": "{},y", # zeropage,y "a": "{}", # absolute "ax": "{},x", # absolute,x "ay": "{},y", # absolute,y "r": "{}", # relative } # fmt: on class Opcode: def __init__(self, mnemonic, mode, length, cycles, variable, undocumented): self.mnemonic = mnemonic self.mode = mode self.length = length self.cycles = cycles self.variable = variable self.undocumented = undocumented opcodes = ( Opcode("brk", "s", 2, 7, False, False), Opcode("ora", "ix", 2, 6, False, False), Opcode("jam", "I", 1, 0, False, True), Opcode("slo", "ix", 2, 8, False, True), Opcode("nop", "z", 2, 3, False, True), Opcode("ora", "z", 2, 3, False, False), Opcode("asl", "z", 2, 5, False, False), Opcode("slo", "z", 2, 5, False, True), Opcode("php", "s", 1, 3, False, False), Opcode("ora", "#", 2, 2, False, False), Opcode("asl", "A", 1, 2, False, False), Opcode("anc", "#", 2, 2, False, True), Opcode("nop", "a", 3, 4, False, True), Opcode("ora", "a", 3, 4, False, False), Opcode("asl", "a", 3, 6, False, False), Opcode("slo", "a", 3, 6, False, True), Opcode("bpl", "r", 2, 2, True, False), Opcode("ora", "iy", 2, 5, True, False), Opcode("jam", "I", 1, 0, False, True), Opcode("slo", "iy", 2, 8, False, True), Opcode("nop", "zx", 2, 4, False, True), Opcode("ora", "zx", 2, 4, False, False), Opcode("asl", "zx", 2, 6, False, False), Opcode("slo", "zx", 2, 6, False, True), Opcode("clc", "I", 1, 2, False, False), Opcode("ora", "ay", 3, 4, True, False), Opcode("nop", "I", 1, 2, False, True), Opcode("slo", "ay", 3, 7, False, True), Opcode("nop", "ax", 3, 4, True, True), Opcode("ora", "ax", 3, 4, True, False), Opcode("asl", "ax", 3, 7, False, False), Opcode("slo", "ax", 3, 7, False, True), Opcode("jsr", "a", 3, 6, False, False), Opcode("and", "ix", 2, 6, False, False), Opcode("jam", "I", 1, 0, False, True), Opcode("rla", "ix", 2, 8, False, True), Opcode("bit", "z", 2, 3, False, False), Opcode("and", "z", 2, 3, False, False), Opcode("rol", "z", 2, 5, False, False), Opcode("rla", "z", 2, 5, False, True), Opcode("plp", "s", 1, 4, False, False), Opcode("and", "#", 2, 2, False, False), Opcode("rol", "A", 1, 2, False, False), Opcode("anc", "#", 2, 2, False, True), Opcode("bit", "a", 3, 4, False, False), Opcode("and", "a", 3, 4, False, False), Opcode("rol", "a", 3, 6, False, False), Opcode("rla", "a", 3, 6, False, True), Opcode("bmi", "r", 2, 2, True, False), Opcode("and", "iy", 2, 5, True, False), Opcode("jam", "I", 1, 0, False, True), Opcode("rla", "iy", 2, 8, False, True), Opcode("nop", "zx", 2, 4, False, True), Opcode("and", "zx", 2, 4, False, False), Opcode("rol", "zx", 2, 6, False, False), Opcode("rla", "zx", 2, 6, False, True), Opcode("sec", "I", 1, 2, False, False), Opcode("and", "ay", 3, 4, True, False), Opcode("nop", "I", 1, 2, False, True), Opcode("rla", "ay", 3, 7, False, True), Opcode("nop", "ax", 3, 4, True, True), Opcode("and", "ax", 3, 4, True, False), Opcode("rol", "ax", 3, 7, False, False), Opcode("rla", "ax", 3, 7, False, True), Opcode("rti", "s", 1, 6, False, False), Opcode("eor", "ix", 2, 6, False, False), Opcode("jam", "I", 1, 0, False, True), Opcode("sre", "ix", 2, 8, False, True), Opcode("nop", "z", 2, 3, False, True), Opcode("eor", "z", 2, 3, False, False), Opcode("lsr", "z", 2, 5, False, False), Opcode("sre", "z", 2, 5, False, True), Opcode("pha", "s", 1, 3, False, False), Opcode("eor", "#", 2, 2, False, False), Opcode("lsr", "A", 1, 2, False, False), Opcode("asr", "#", 2, 2, False, True), Opcode("jmp", "a", 3, 3, False, False), Opcode("eor", "a", 3, 4, False, False), Opcode("lsr", "a", 3, 6, False, False), Opcode("sre", "a", 3, 6, False, True), Opcode("bvc", "r", 2, 2, True, False), Opcode("eor", "iy", 2, 5, True, False), Opcode("jam", "I", 1, 0, False, True), Opcode("sre", "iy", 2, 8, False, True), Opcode("nop", "zx", 2, 4, False, True), Opcode("eor", "zx", 2, 4, False, False), Opcode("lsr", "zx", 2, 6, False, False), Opcode("sre", "zx", 2, 6, False, True), Opcode("cli", "I", 1, 2, False, False), Opcode("eor", "ay", 3, 4, True, False), Opcode("nop", "I", 1, 2, False, True), Opcode("sre", "ay", 3, 7, False, True), Opcode("nop", "ax", 3, 4, True, True), Opcode("eor", "ax", 3, 4, True, False), Opcode("lsr", "ax", 3, 7, False, False), Opcode("sre", "ax", 3, 7, False, True), Opcode("rts", "s", 1, 6, False, False), Opcode("adc", "ix", 2, 6, False, False), Opcode("jam", "I", 1, 0, False, True), Opcode("rra", "ix", 2, 8, False, True), Opcode("nop", "z", 2, 3, False, True), Opcode("adc", "z", 2, 3, False, False), Opcode("ror", "z", 2, 5, False, False), Opcode("rra", "z", 2, 5, False, True), Opcode("pla", "s", 1, 4, False, False), Opcode("adc", "#", 2, 2, False, False), Opcode("ror", "A", 1, 2, False, False), Opcode("arr", "#", 2, 2, False, True), Opcode("jmp", "i", 3, 5, False, False), Opcode("adc", "a", 3, 4, False, False), Opcode("ror", "a", 3, 6, False, False), Opcode("rra", "a", 3, 6, False, True), Opcode("bvs", "r", 2, 2, True, False), Opcode("adc", "iy", 2, 5, True, False), Opcode("jam", "I", 1, 0, False, True), Opcode("rra", "iy", 2, 8, False, True), Opcode("nop", "zx", 2, 4, False, True), Opcode("adc", "zx", 2, 4, False, False), Opcode("ror", "zx", 2, 6, False, False), Opcode("rra", "zx", 2, 6, False, True), Opcode("sei", "I", 1, 2, False, False), Opcode("adc", "ay", 3, 4, True, False), Opcode("nop", "I", 1, 2, False, True), Opcode("rra", "ay", 3, 7, False, True), Opcode("nop", "ax", 3, 4, True, True), Opcode("adc", "ax", 3, 4, True, False), Opcode("ror", "ax", 3, 7, False, False), Opcode("rra", "ax", 3, 7, False, True), Opcode("nop", "#", 2, 2, False, True), Opcode("sta", "ix", 2, 6, False, False), Opcode("nop", "#", 2, 2, False, True), Opcode("sax", "ix", 2, 6, False, True), Opcode("sty", "z", 2, 3, False, False), Opcode("sta", "z", 2, 3, False, False), Opcode("stx", "z", 2, 3, False, False), Opcode("sax", "z", 2, 3, False, True), Opcode("dey", "I", 1, 2, False, False), Opcode("nop", "#", 2, 2, False, True), Opcode("txa", "I", 1, 2, False, False), Opcode("ane", "#", 2, 2, False, True), Opcode("sty", "a", 3, 4, False, False), Opcode("sta", "a", 3, 4, False, False), Opcode("stx", "a", 3, 4, False, False), Opcode("sax", "a", 3, 4, False, True), Opcode("bcc", "r", 2, 2, True, False), Opcode("sta", "iy", 2, 6, False, False), Opcode("jam", "I", 1, 0, False, True), Opcode("sha", "ax", 3, 5, False, True), Opcode("sty", "zx", 2, 4, False, False), Opcode("sta", "zx", 2, 4, False, False), Opcode("stx", "zy", 2, 4, False, False), Opcode("sax", "zy", 2, 4, False, True), Opcode("tya", "I", 1, 2, False, False), Opcode("sta", "ay", 3, 5, False, False), Opcode("txs", "I", 1, 2, False, False), Opcode("shs", "ax", 3, 5, False, True), Opcode("shy", "ay", 3, 5, False, True), Opcode("sta", "ax", 3, 5, False, False), Opcode("shx", "ay", 3, 5, False, True), Opcode("sha", "ay", 3, 5, False, True), Opcode("ldy", "#", 2, 2, False, False), Opcode("lda", "ix", 2, 6, False, False), Opcode("ldx", "#", 2, 2, False, False), Opcode("lax", "ix", 2, 6, False, True), Opcode("ldy", "z", 2, 3, False, False), Opcode("lda", "z", 2, 3, False, False), Opcode("ldx", "z", 2, 3, False, False), Opcode("lax", "z", 2, 3, False, True), Opcode("tay", "I", 1, 2, False, False), Opcode("lda", "#", 2, 2, False, False), Opcode("tax", "I", 1, 2, False, False), Opcode("lxa", "#", 2, 2, False, True), Opcode("ldy", "a", 3, 4, False, False), Opcode("lda", "a", 3, 4, False, False), Opcode("ldx", "a", 3, 4, False, False), Opcode("lax", "a", 3, 4, False, True), Opcode("bcs", "r", 2, 2, True, False), Opcode("lda", "iy", 2, 5, True, False), Opcode("jam", "I", 1, 0, False, True), Opcode("lax", "iy", 2, 5, True, True), Opcode("ldy", "zx", 2, 4, False, False), Opcode("lda", "zx", 2, 4, False, False), Opcode("ldx", "zy", 2, 4, False, False), Opcode("lax", "zy", 2, 4, False, True), Opcode("clv", "I", 1, 2, False, False), Opcode("lda", "ay", 3, 4, True, False), Opcode("tsx", "I", 1, 2, False, False), Opcode("lae", "ay", 3, 4, True, True), Opcode("ldy", "ax", 3, 4, False, False), Opcode("lda", "ax", 3, 4, True, False), Opcode("ldx", "ay", 3, 4, True, False), Opcode("lax", "ay", 3, 4, True, True), Opcode("cpy", "#", 2, 2, False, False), Opcode("cmp", "ix", 2, 6, False, False), Opcode("nop", "#", 2, 2, False, True), Opcode("dcp", "ix", 2, 8, False, True), Opcode("cpy", "z", 2, 3, False, False), Opcode("cmp", "z", 2, 3, False, False), Opcode("dec", "z", 2, 5, False, False), Opcode("dcp", "z", 2, 5, False, True), Opcode("iny", "I", 1, 2, False, False), Opcode("cmp", "#", 2, 2, False, False), Opcode("dex", "I", 1, 2, False, False), Opcode("sbx", "#", 2, 2, False, True), Opcode("cpy", "a", 3, 4, False, False), Opcode("cmp", "a", 3, 4, False, False), Opcode("dec", "a", 3, 4, False, False), Opcode("dcp", "a", 3, 6, False, True), Opcode("bne", "r", 2, 2, True, False), Opcode("cmp", "iy", 2, 5, True, False), Opcode("jam", "I", 1, 0, False, True), Opcode("dcp", "iy", 2, 8, False, True), Opcode("nop", "zx", 2, 4, False, True), Opcode("cmp", "zx", 2, 4, False, False), Opcode("dec", "zx", 2, 6, False, False), Opcode("dcp", "zx", 2, 6, False, True), Opcode("cld", "I", 1, 2, False, False), Opcode("cmp", "ay", 3, 4, True, False), Opcode("nop", "I", 1, 2, False, True), Opcode("dcp", "ay", 3, 7, False, True), Opcode("nop", "ax", 3, 4, True, True), Opcode("cmp", "ax", 3, 4, True, False), Opcode("dec", "ax", 3, 7, False, False), Opcode("dcp", "ax", 3, 7, False, True), Opcode("cpx", "#", 2, 2, False, False), Opcode("sbc", "ix", 2, 6, False, False), Opcode("nop", "#", 2, 2, False, True), Opcode("isb", "ix", 2, 8, False, True), Opcode("cpx", "z", 2, 3, False, False), Opcode("sbc", "z", 2, 3, False, False), Opcode("inc", "z", 2, 5, False, False), Opcode("isb", "z", 2, 5, False, True), Opcode("inx", "I", 1, 2, False, False), Opcode("sbc", "#", 2, 2, False, False), Opcode("nop", "I", 1, 2, False, False), Opcode("sbc", "#", 2, 2, False, True), Opcode("cpx", "a", 3, 4, False, False), Opcode("sbc", "a", 3, 4, False, False), Opcode("inc", "a", 3, 6, False, False), Opcode("isb", "a", 3, 6, False, True), Opcode("beq", "r", 2, 2, True, False), Opcode("sbc", "iy", 2, 5, True, False), Opcode("jam", "I", 1, 0, False, True), Opcode("isb", "iy", 2, 8, False, True), Opcode("nop", "zx", 2, 4, False, True), Opcode("sbc", "zx", 2, 4, False, False), Opcode("inc", "zx", 2, 6, False, False), Opcode("isb", "zx", 2, 6, False, True), Opcode("sed", "I", 1, 2, False, False), Opcode("sbc", "ay", 3, 4, True, False), Opcode("nop", "I", 1, 2, False, True), Opcode("isb", "ay", 3, 7, False, True), Opcode("nop", "ax", 3, 4, True, True), Opcode("sbc", "ax", 3, 4, True, False), Opcode("inc", "ax", 3, 7, False, False), Opcode("isb", "ax", 3, 7, False, True), ) class AsmInstr: def __init__(self, addr, data): self.addr = addr self.bytes = bytes(data) self.opcode = opcodes[data[0]] if len(data) == 3: self.operand = to_word(data[1:]) elif len(data) == 2: self.operand = self.bytes[1] if self.opcode.mode == "r": if self.operand & 0x80: self.operand = (self.operand & 0x7F) - 0x80 self.operand += self.addr else: self.operand = None def format(self, symbols=None, lower=True, addr=True, bytes=True): if self.operand is not None: if symbols and self.operand in symbols: sym = symbols[self.operand] else: if self.opcode.length == 2: if self.opcode.mode == "r": sym = f"${self.operand:04x}" else: sym = f"${self.operand:02x}" elif self.opcode.length == 3: sym = f"${self.operand:04x}" if not lower: sym = sym.upper() else: sym = "" nem = self.opcode.mnemonic fmt = operand_formats[self.opcode.mode] if not lower: nem = nem.upper() fmt = fmt.upper() text = "" if addr: text += f"{self.addr:04x} " if bytes: text += f"{self.bytes.hex(' '): <9} " text += f"{nem} {fmt.format(sym)}" return text def __str__(self): return self.format() def __repr__(self): classname = self.__class__.__name__ return f"{classname}(0x{self.addr:04x}, {self.bytes})" def __lt__(self, other): return self.addr < other.addr class BasicLine: def __init__(self, addr, bytes): self.addr = addr self.bytes = bytes self.link = to_word(bytes) self.lineno = to_word(bytes[2:]) def format(self, lower=False): line = [f"{self.lineno} "] for byte in self.bytes[4:-1]: if byte >= 0x80 and byte <= 0xCA: token = basic_tokens[byte - 0x80] line.append(token.lower() if lower else token) elif byte == 0xFF: line.append(basic_tokens[-1]) # pi else: line.append(petscii.to_unicode(bytes([byte]), lower)) return "".join(line) def syscalls(self): addrs = [] try: tok = self.bytes[4:] while True: beg = tok.index(0x9e) + 1 while tok[beg] == 0x20: beg += 1 end = beg while tok[end] >= 0x30 and tok[end] <= 0x39: end += 1 addrs.append(int(tok[beg:end])) tok = tok[end:] except Exception as e: pass return addrs def __str__(self): return self.format() def __repr__(self): classname = self.__class__.__name__ return f"{classname}(0x{self.addr:04x}, {self.bytes})" def __lt__(self, other): return self.addr < other.addr class Data: def __init__(self, addr, bytes): self.addr = addr self.bytes = bytes def format(self): lines = [] for i in range(len(0, self.data), 16): lines.append(f"{self.addr+i} {self.data[i:i+16].hex(' ')}") return "\n".join(lines) def __str__(self): return self.format() def __lt__(self, other): return self.addr < other.addr branchops = {'bvs', 'bcs', 'beq', 'bmi', 'bcc', 'bne', 'bpl', 'bvc', 'jsr'} class State: def __init__(self): self.mem = bytearray(64*1024) self.claimed = set() self.blocks = [] def load_prg(self, data): load = to_word(data) data = data[2:] self.mem[load:load+len(data)] = data if load == 0x801: self.parse_basic(0x801) def parse_basic(self, addr): prev = None while True: it = iter(self.mem[addr:]) try: line = [] for _ in range(4): line.append(next(it)) link = to_word(line) if link == 0: break b = 0xFF while b != 0: b = next(it) line.append(b) cur = BasicLine(addr, bytes(line)) self.insert_block(cur) if prev is not None: prev.next = cur addr = link except StopIteration: break def trace_asm(self, start): heads = set() next = start while True: op = opcodes[self.mem[next]] instr = AsmInstr(next, self.mem[next:next+op.length]) next += op.length instaddrs = set(range(instr.addr, next)) if ( op.mnemonic in ("rts", "brk") or op.undocumented or self.claimed & instaddrs ): if len(heads) > 0: next = heads.pop() else: break else: self.claimed.update(instaddrs) self.insert_block(instr) if op.mnemonic == "jmp": if op.mode == "a": next = instr.operand elif op.mode == "i": next = to_word(self.mem[instr.operand]) elif op.mnemonic in branchops: heads.add(instr.operand) def insert_block(self, block): bisect.insort_right(self.blocks, block) def __str__(self): return "\n".join(str(b) for b in self.blocks)